summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.bp2
-rw-r--r--build/Android.bp7
-rw-r--r--build/Android.gtest.mk36
-rw-r--r--build/art.go39
-rw-r--r--cmdline/cmdline.h5
-rw-r--r--compiler/Android.bp12
-rw-r--r--compiler/cfi_test.h2
-rw-r--r--compiler/compiled_method.cc28
-rw-r--r--compiler/debug/dwarf/dwarf_test.h3
-rw-r--r--compiler/debug/elf_debug_frame_writer.h16
-rw-r--r--compiler/debug/elf_debug_line_writer.h16
-rw-r--r--compiler/debug/elf_debug_loc_writer.h34
-rw-r--r--compiler/debug/elf_symtab_writer.h2
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc4
-rw-r--r--compiler/dex/inline_method_analyser.cc3
-rw-r--r--compiler/dex/verified_method.cc7
-rw-r--r--compiler/driver/compiled_method_storage_test.cc4
-rw-r--r--compiler/driver/compiler_driver.cc90
-rw-r--r--compiler/driver/compiler_driver.h5
-rw-r--r--compiler/driver/compiler_options.cc2
-rw-r--r--compiler/driver/compiler_options.h12
-rw-r--r--compiler/driver/compiler_options_map-inl.h10
-rw-r--r--compiler/driver/compiler_options_map.def2
-rw-r--r--compiler/exception_test.cc5
-rw-r--r--compiler/jni/jni_cfi_test.cc14
-rw-r--r--compiler/jni/quick/calling_convention.cc28
-rw-r--r--compiler/jni/quick/jni_compiler.cc5
-rw-r--r--compiler/linker/arm/relative_patcher_thumb2.cc2
-rw-r--r--compiler/linker/arm/relative_patcher_thumb2_test.cc14
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64.cc13
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64_test.cc15
-rw-r--r--compiler/linker/elf_builder.h29
-rw-r--r--compiler/linker/mips/relative_patcher_mips32r6_test.cc2
-rw-r--r--compiler/linker/mips/relative_patcher_mips_test.cc2
-rw-r--r--compiler/linker/mips64/relative_patcher_mips64_test.cc2
-rw-r--r--compiler/linker/relative_patcher.cc14
-rw-r--r--compiler/linker/x86/relative_patcher_x86_test.cc2
-rw-r--r--compiler/linker/x86_64/relative_patcher_x86_64_test.cc2
-rw-r--r--compiler/optimizing/code_generator.cc373
-rw-r--r--compiler/optimizing/code_generator.h91
-rw-r--r--compiler/optimizing/code_generator_arm64.cc59
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc71
-rw-r--r--compiler/optimizing/code_generator_mips.cc125
-rw-r--r--compiler/optimizing/code_generator_mips.h4
-rw-r--r--compiler/optimizing/code_generator_mips64.cc104
-rw-r--r--compiler/optimizing/code_generator_vector_mips.cc174
-rw-r--r--compiler/optimizing/code_generator_vector_mips64.cc168
-rw-r--r--compiler/optimizing/code_generator_x86.cc81
-rw-r--r--compiler/optimizing/code_generator_x86.h4
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc82
-rw-r--r--compiler/optimizing/code_generator_x86_64.h4
-rw-r--r--compiler/optimizing/codegen_test.cc14
-rw-r--r--compiler/optimizing/codegen_test_utils.h4
-rw-r--r--compiler/optimizing/common_arm64.h2
-rw-r--r--compiler/optimizing/data_type.h2
-rw-r--r--compiler/optimizing/emit_swap_mips_test.cc32
-rw-r--r--compiler/optimizing/graph_visualizer.cc6
-rw-r--r--compiler/optimizing/instruction_builder.cc14
-rw-r--r--compiler/optimizing/instruction_simplifier.cc18
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.cc6
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.cc6
-rw-r--r--compiler/optimizing/instruction_simplifier_shared.cc8
-rw-r--r--compiler/optimizing/instruction_simplifier_shared.h3
-rw-r--r--compiler/optimizing/intrinsics.h81
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc28
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc24
-rw-r--r--compiler/optimizing/intrinsics_mips.cc49
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc44
-rw-r--r--compiler/optimizing/intrinsics_x86.cc23
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc23
-rw-r--r--compiler/optimizing/load_store_analysis.h6
-rw-r--r--compiler/optimizing/load_store_analysis_test.cc64
-rw-r--r--compiler/optimizing/load_store_elimination.cc23
-rw-r--r--compiler/optimizing/loop_optimization.cc336
-rw-r--r--compiler/optimizing/loop_optimization.h34
-rw-r--r--compiler/optimizing/nodes.h34
-rw-r--r--compiler/optimizing/nodes_shared.h32
-rw-r--r--compiler/optimizing/nodes_vector.h16
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc31
-rw-r--r--compiler/optimizing/optimizing_cfi_test_expected.inc12
-rw-r--r--compiler/optimizing/optimizing_compiler.cc56
-rw-r--r--compiler/optimizing/reference_type_propagation.cc2
-rw-r--r--compiler/optimizing/register_allocation_resolver.cc1
-rw-r--r--compiler/optimizing/register_allocator.cc14
-rw-r--r--compiler/optimizing/scheduler.cc6
-rw-r--r--compiler/optimizing/scheduler_test.cc12
-rw-r--r--compiler/optimizing/stack_map_stream.cc34
-rw-r--r--compiler/optimizing/stack_map_stream.h35
-rw-r--r--compiler/optimizing/stack_map_test.cc60
-rw-r--r--compiler/trampolines/trampoline_compiler.cc14
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.h2
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.h2
-rw-r--r--compiler/utils/assembler_test.h18
-rw-r--r--compiler/utils/assembler_thumb_test.cc6
-rw-r--r--compiler/utils/jni_macro_assembler.cc14
-rw-r--r--compiler/utils/mips/assembler_mips.cc222
-rw-r--r--compiler/utils/mips/assembler_mips.h27
-rw-r--r--compiler/utils/mips/assembler_mips32r6_test.cc126
-rw-r--r--compiler/utils/mips/assembler_mips_test.cc8
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc156
-rw-r--r--compiler/utils/mips64/assembler_mips64.h30
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc145
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.h2
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.h2
-rw-r--r--dex2oat/Android.bp7
-rw-r--r--dex2oat/dex2oat.cc34
-rw-r--r--dex2oat/dex2oat_image_test.cc2
-rw-r--r--dex2oat/dex2oat_options.cc7
-rw-r--r--dex2oat/dex2oat_options.def1
-rw-r--r--dex2oat/dex2oat_options.h1
-rw-r--r--dex2oat/dex2oat_test.cc144
-rw-r--r--dex2oat/linker/elf_writer_quick.cc3
-rw-r--r--dex2oat/linker/elf_writer_test.cc1
-rw-r--r--dex2oat/linker/image_test.h4
-rw-r--r--dex2oat/linker/oat_writer.cc27
-rw-r--r--dex2oat/linker/oat_writer.h9
-rw-r--r--dex2oat/linker/oat_writer_test.cc19
-rw-r--r--dexdump/Android.bp21
-rw-r--r--dexdump/dexdump.cc3
-rw-r--r--dexlayout/Android.bp6
-rw-r--r--dexlayout/compact_dex_writer.cc53
-rw-r--r--dexlayout/compact_dex_writer.h43
-rw-r--r--dexlayout/dex_ir.cc40
-rw-r--r--dexlayout/dex_ir.h10
-rw-r--r--dexlayout/dex_ir_builder.cc1
-rw-r--r--dexlayout/dex_writer.cc77
-rw-r--r--dexlayout/dex_writer.h12
-rw-r--r--dexlayout/dexdiag_test.cc1
-rw-r--r--dexlayout/dexlayout.cc128
-rw-r--r--dexlayout/dexlayout.h7
-rw-r--r--dexlayout/dexlayout_test.cc87
-rw-r--r--dexlist/dexlist.cc3
-rw-r--r--dexoptanalyzer/dexoptanalyzer.cc56
-rw-r--r--disassembler/disassembler.cc12
-rw-r--r--disassembler/disassembler_mips.cc33
-rw-r--r--oatdump/oatdump.cc24
-rw-r--r--oatdump/oatdump_test.h1
-rw-r--r--openjdkjvm/Android.bp4
-rw-r--r--openjdkjvm/OpenjdkJvm.cc4
-rw-r--r--openjdkjvmti/Android.bp7
-rw-r--r--openjdkjvmti/OpenjdkJvmTi.cc6
-rw-r--r--openjdkjvmti/art_jvmti.h13
-rw-r--r--openjdkjvmti/deopt_manager.cc322
-rw-r--r--openjdkjvmti/deopt_manager.h168
-rw-r--r--openjdkjvmti/events-inl.h11
-rw-r--r--openjdkjvmti/events.cc57
-rw-r--r--openjdkjvmti/events.h1
-rw-r--r--openjdkjvmti/jvmti_weak_table-inl.h2
-rw-r--r--openjdkjvmti/ti_breakpoint.cc62
-rw-r--r--openjdkjvmti/ti_class.cc2
-rw-r--r--openjdkjvmti/ti_class_loader.cc2
-rw-r--r--openjdkjvmti/ti_field.cc4
-rw-r--r--openjdkjvmti/ti_field.h14
-rw-r--r--openjdkjvmti/ti_method.cc37
-rw-r--r--openjdkjvmti/ti_monitor.cc6
-rw-r--r--openjdkjvmti/ti_phase.cc2
-rw-r--r--openjdkjvmti/ti_properties.cc4
-rw-r--r--openjdkjvmti/ti_redefine.cc2
-rw-r--r--openjdkjvmti/ti_search.cc5
-rw-r--r--openjdkjvmti/ti_stack.cc73
-rw-r--r--openjdkjvmti/ti_thread.cc71
-rw-r--r--openjdkjvmti/ti_thread.h11
-rw-r--r--patchoat/patchoat.cc6
-rw-r--r--profman/profman.cc2
-rw-r--r--runtime/Android.bp21
-rw-r--r--runtime/arch/arm/fault_handler_arm.cc4
-rw-r--r--runtime/arch/arm/instruction_set_features_arm.cc4
-rw-r--r--runtime/arch/arm/instruction_set_features_arm.h2
-rw-r--r--runtime/arch/arm/instruction_set_features_arm_test.cc20
-rw-r--r--runtime/arch/arm64/fault_handler_arm64.cc4
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.cc2
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.h2
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64_test.cc20
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S2
-rw-r--r--runtime/arch/instruction_set.cc92
-rw-r--r--runtime/arch/instruction_set.h115
-rw-r--r--runtime/arch/instruction_set_features.cc109
-rw-r--r--runtime/arch/instruction_set_test.cc50
-rw-r--r--runtime/arch/mips/asm_support_mips.S26
-rw-r--r--runtime/arch/mips/asm_support_mips.h2
-rw-r--r--runtime/arch/mips/context_mips.cc15
-rw-r--r--runtime/arch/mips/fault_handler_mips.cc4
-rw-r--r--runtime/arch/mips/instruction_set_features_mips.cc2
-rw-r--r--runtime/arch/mips/instruction_set_features_mips.h2
-rw-r--r--runtime/arch/mips/instruction_set_features_mips_test.cc40
-rw-r--r--runtime/arch/mips/jni_entrypoints_mips.S10
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S216
-rw-r--r--runtime/arch/mips/quick_method_frame_info_mips.h18
-rw-r--r--runtime/arch/mips64/fault_handler_mips64.cc4
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64.cc2
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64.h2
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64_test.cc10
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S2
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc8
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.h2
-rw-r--r--runtime/arch/x86/instruction_set_features_x86_test.cc40
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S2
-rw-r--r--runtime/arch/x86_64/instruction_set_features_x86_64.h2
-rw-r--r--runtime/arch/x86_64/instruction_set_features_x86_64_test.cc4
-rw-r--r--runtime/art_field-inl.h38
-rw-r--r--runtime/art_field.h4
-rw-r--r--runtime/art_method-inl.h1
-rw-r--r--runtime/art_method.cc13
-rw-r--r--runtime/art_method.h2
-rw-r--r--runtime/asm_support.h6
-rw-r--r--runtime/atomic.h2
-rw-r--r--runtime/base/bit_vector-inl.h18
-rw-r--r--runtime/base/bit_vector.h20
-rw-r--r--runtime/base/debug_stack.h10
-rw-r--r--runtime/base/file_magic.cc23
-rw-r--r--runtime/base/file_magic.h3
-rw-r--r--runtime/base/file_utils.cc356
-rw-r--r--runtime/base/file_utils.h86
-rw-r--r--runtime/base/mutex.cc4
-rw-r--r--runtime/base/mutex.h2
-rw-r--r--runtime/base/safe_copy.cc2
-rw-r--r--runtime/base/scoped_arena_allocator.cc33
-rw-r--r--runtime/base/scoped_arena_allocator.h18
-rw-r--r--runtime/cdex/compact_dex_file.cc8
-rw-r--r--runtime/cdex/compact_dex_file.h19
-rw-r--r--runtime/cdex/compact_dex_file_test.cc6
-rw-r--r--runtime/cdex/compact_dex_level.h (renamed from tools/cpp-define-generator/offset_codeitem.def)27
-rw-r--r--runtime/class_linker.cc83
-rw-r--r--runtime/class_linker.h5
-rw-r--r--runtime/class_linker_test.cc43
-rw-r--r--runtime/class_loader_context.cc1
-rw-r--r--runtime/common_dex_operations.h28
-rw-r--r--runtime/common_runtime_test.cc31
-rw-r--r--runtime/common_runtime_test.h4
-rw-r--r--runtime/common_throws.cc5
-rw-r--r--runtime/debugger.cc10
-rw-r--r--runtime/debugger.h1
-rw-r--r--runtime/dex2oat_environment_test.h3
-rw-r--r--runtime/dex_file.cc44
-rw-r--r--runtime/dex_file.h95
-rw-r--r--runtime/dex_file_annotations.cc17
-rw-r--r--runtime/dex_file_layout.cc1
-rw-r--r--runtime/dex_file_loader.cc111
-rw-r--r--runtime/dex_file_loader.h26
-rw-r--r--runtime/dex_file_test.cc12
-rw-r--r--runtime/dex_file_verifier.cc18
-rw-r--r--runtime/dex_file_verifier.h2
-rw-r--r--runtime/dex_file_verifier_test.cc5
-rw-r--r--runtime/dex_instruction.cc20
-rw-r--r--runtime/dex_instruction.h9
-rw-r--r--runtime/dex_instruction_iterator.h216
-rw-r--r--runtime/dex_instruction_test.cc6
-rw-r--r--runtime/entrypoints/quick/callee_save_frame.h58
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc69
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc30
-rw-r--r--runtime/gc/collector/concurrent_copying.cc9
-rw-r--r--runtime/gc/collector/mark_sweep.cc1
-rw-r--r--runtime/gc/heap.cc10
-rw-r--r--runtime/gc/heap.h4
-rw-r--r--runtime/gc/reference_processor.cc2
-rw-r--r--runtime/gc/space/image_space.cc1
-rw-r--r--runtime/gc/space/image_space_fs.h3
-rw-r--r--runtime/gc/verification.cc1
-rw-r--r--runtime/generated/asm_support_gen.h30
-rw-r--r--runtime/instrumentation.cc9
-rw-r--r--runtime/interpreter/interpreter.cc13
-rw-r--r--runtime/interpreter/interpreter_common.cc450
-rw-r--r--runtime/interpreter/interpreter_common.h13
-rw-r--r--runtime/interpreter/interpreter_intrinsics.cc108
-rw-r--r--runtime/interpreter/interpreter_mterp_impl.h2
-rw-r--r--runtime/interpreter/mterp/arm/entry.S9
-rw-r--r--runtime/interpreter/mterp/arm/footer.S5
-rw-r--r--runtime/interpreter/mterp/arm/header.S5
-rw-r--r--runtime/interpreter/mterp/arm/op_aget_wide.S2
-rw-r--r--runtime/interpreter/mterp/arm64/close_cfi.S4
-rw-r--r--runtime/interpreter/mterp/arm64/entry.S14
-rw-r--r--runtime/interpreter/mterp/arm64/footer.S8
-rw-r--r--runtime/interpreter/mterp/arm64/header.S19
-rw-r--r--runtime/interpreter/mterp/config_arm643
-rwxr-xr-xruntime/interpreter/mterp/gen_mterp.py6
-rw-r--r--runtime/interpreter/mterp/mips/entry.S9
-rw-r--r--runtime/interpreter/mterp/mips/footer.S6
-rw-r--r--runtime/interpreter/mterp/mips/header.S5
-rw-r--r--runtime/interpreter/mterp/mips64/entry.S9
-rw-r--r--runtime/interpreter/mterp/mips64/footer.S5
-rw-r--r--runtime/interpreter/mterp/mips64/header.S2
-rw-r--r--runtime/interpreter/mterp/mterp.cc2
-rw-r--r--runtime/interpreter/mterp/mterp_stub.cc6
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm.S28
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm64.S54
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips.S27
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips64.S23
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86.S19
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86_64.S19
-rw-r--r--runtime/interpreter/mterp/x86/entry.S5
-rw-r--r--runtime/interpreter/mterp/x86/footer.S5
-rw-r--r--runtime/interpreter/mterp/x86/header.S2
-rw-r--r--runtime/interpreter/mterp/x86_64/entry.S5
-rw-r--r--runtime/interpreter/mterp/x86_64/footer.S5
-rw-r--r--runtime/interpreter/mterp/x86_64/header.S2
-rw-r--r--runtime/interpreter/shadow_frame.h19
-rw-r--r--runtime/interpreter/unstarted_runtime.cc14
-rw-r--r--runtime/interpreter/unstarted_runtime_list.h1
-rw-r--r--runtime/interpreter/unstarted_runtime_test.cc19
-rw-r--r--runtime/intrinsics_enum.h (renamed from compiler/intrinsics_enum.h)6
-rw-r--r--runtime/intrinsics_list.h (renamed from compiler/intrinsics_list.h)82
-rw-r--r--runtime/invoke_type.h13
-rw-r--r--runtime/java_vm_ext.cc4
-rw-r--r--runtime/jit/jit.cc7
-rw-r--r--runtime/jit/jit_code_cache.cc4
-rw-r--r--runtime/jit/profile_compilation_info.cc1
-rw-r--r--runtime/jit/profiling_info.cc6
-rw-r--r--runtime/jni_internal.cc2
-rw-r--r--runtime/jni_internal_test.cc2
-rw-r--r--runtime/mem_map.cc1
-rw-r--r--runtime/method_handles.cc293
-rw-r--r--runtime/method_handles.h25
-rw-r--r--runtime/mirror/emulated_stack_frame.cc2
-rw-r--r--runtime/mirror/field-inl.h2
-rw-r--r--runtime/mirror/object.cc6
-rw-r--r--runtime/mirror/var_handle.cc399
-rw-r--r--runtime/mirror/var_handle.h255
-rw-r--r--runtime/mirror/var_handle_test.cc991
-rw-r--r--runtime/monitor.cc19
-rw-r--r--runtime/native/dalvik_system_DexFile.cc41
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc4
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc33
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc9
-rw-r--r--runtime/native/java_lang_Class.cc4
-rw-r--r--runtime/native/java_lang_String.cc2
-rw-r--r--runtime/native/java_lang_StringFactory.cc4
-rw-r--r--runtime/native/java_lang_Thread.cc2
-rw-r--r--runtime/native/java_lang_VMClassLoader.cc4
-rw-r--r--runtime/native/libcore_util_CharsetUtils.cc2
-rw-r--r--runtime/native/native_util.h2
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc2
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc5
-rw-r--r--runtime/native_stack_dump.cc1
-rw-r--r--runtime/non_debuggable_classes.cc2
-rw-r--r--runtime/oat.cc2
-rw-r--r--runtime/oat_file.cc1
-rw-r--r--runtime/oat_file_assistant.cc124
-rw-r--r--runtime/oat_file_assistant.h26
-rw-r--r--runtime/oat_file_assistant_test.cc100
-rw-r--r--runtime/oat_file_manager.cc9
-rw-r--r--runtime/oat_quick_method_header.h8
-rw-r--r--runtime/parsed_options.cc3
-rw-r--r--runtime/prebuilt_tools_test.cc2
-rw-r--r--runtime/proxy_test.cc4
-rw-r--r--runtime/reflection.cc2
-rw-r--r--runtime/reflection_test.cc2
-rw-r--r--runtime/runtime.cc62
-rw-r--r--runtime/runtime_callbacks.cc11
-rw-r--r--runtime/runtime_callbacks.h10
-rw-r--r--runtime/runtime_callbacks_test.cc2
-rw-r--r--runtime/runtime_common.cc1
-rw-r--r--runtime/runtime_intrinsics.cc84
-rw-r--r--runtime/runtime_intrinsics.h (renamed from tools/ahat/src/main/com/android/ahat/heapdump/AhatField.java)30
-rw-r--r--runtime/signal_catcher.cc1
-rw-r--r--runtime/standard_dex_file.h13
-rw-r--r--runtime/thread.cc76
-rw-r--r--runtime/thread.h9
-rw-r--r--runtime/thread_list.cc11
-rw-r--r--runtime/trace.cc2
-rw-r--r--runtime/utils.cc306
-rw-r--r--runtime/utils.h63
-rw-r--r--runtime/utils_test.cc3
-rw-r--r--runtime/verifier/method_verifier.cc52
-rw-r--r--runtime/verifier/reg_type_cache.cc5
-rw-r--r--runtime/verifier/reg_type_cache.h2
-rw-r--r--runtime/well_known_classes.cc6
-rw-r--r--runtime/well_known_classes.h1
-rw-r--r--simulator/code_simulator.cc2
-rw-r--r--simulator/code_simulator_arm64.h3
-rw-r--r--test/044-proxy/expected.txt1
-rw-r--r--test/044-proxy/run18
-rw-r--r--test/044-proxy/src/Main.java1
-rw-r--r--test/044-proxy/src/OOMEOnDispatch.java74
-rw-r--r--test/137-cfi/cfi.cc1
-rw-r--r--test/203-multi-checkpoint/expected.txt5
-rw-r--r--test/203-multi-checkpoint/info.txt4
-rw-r--r--test/203-multi-checkpoint/multi_checkpoint.cc90
-rw-r--r--test/203-multi-checkpoint/src/Main.java59
-rw-r--r--test/458-checker-instruct-simplification/src/Main.java84
-rw-r--r--test/484-checker-register-hints/smali/Smali.smali143
-rw-r--r--test/484-checker-register-hints/src/Main.java12
-rwxr-xr-xtest/593-checker-boolean-2-integral-conv/build3
-rw-r--r--test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali137
-rw-r--r--test/593-checker-boolean-2-integral-conv/src/Main.java70
-rw-r--r--test/623-checker-loop-regressions/src/Main.java51
-rw-r--r--test/640-checker-boolean-simd/src/Main.java48
-rw-r--r--test/640-checker-byte-simd/src/Main.java90
-rw-r--r--test/640-checker-char-simd/src/Main.java90
-rw-r--r--test/640-checker-double-simd/src/Main.java57
-rw-r--r--test/640-checker-float-simd/src/Main.java63
-rw-r--r--test/640-checker-int-simd/src/Main.java184
-rw-r--r--test/640-checker-long-simd/src/Main.java136
-rw-r--r--test/640-checker-short-simd/src/Main.java90
-rw-r--r--test/645-checker-abs-simd/src/Main.java109
-rw-r--r--test/646-checker-hadd-alt-byte/src/Main.java177
-rwxr-xr-x[-rw-r--r--]test/646-checker-hadd-alt-char/build (renamed from test/1929-exception-catch-exception/build)0
-rw-r--r--test/646-checker-hadd-alt-char/src/Main.java136
-rwxr-xr-x[-rw-r--r--]test/646-checker-hadd-alt-short/build (renamed from test/484-checker-register-hints/build)0
-rw-r--r--test/646-checker-hadd-alt-short/src/Main.java177
-rw-r--r--test/646-checker-hadd-byte/src/Main.java177
-rwxr-xr-x[-rw-r--r--]test/646-checker-hadd-char/build (renamed from test/611-checker-simplify-if/build)0
-rw-r--r--test/646-checker-hadd-char/src/Main.java136
-rwxr-xr-xtest/646-checker-hadd-short/build20
-rw-r--r--test/646-checker-hadd-short/src/Main.java290
-rw-r--r--test/651-checker-byte-simd-minmax/src/Main.java127
-rw-r--r--test/651-checker-char-simd-minmax/src/Main.java53
-rw-r--r--test/651-checker-double-simd-minmax/src/Main.java18
-rw-r--r--test/651-checker-float-simd-minmax/src/Main.java18
-rw-r--r--test/651-checker-int-simd-minmax/src/Main.java46
-rw-r--r--test/651-checker-long-simd-minmax/src/Main.java18
-rwxr-xr-xtest/651-checker-short-simd-minmax/build20
-rw-r--r--test/651-checker-short-simd-minmax/src/Main.java127
-rw-r--r--test/660-checker-simd-sad-int/src/Main.java64
-rwxr-xr-xtest/660-checker-simd-sad-short2/build20
-rw-r--r--test/661-checker-simd-reduc/src/Main.java409
-rw-r--r--test/665-checker-simd-zero/src/Main.java48
-rw-r--r--test/667-checker-simd-alignment/expected.txt1
-rw-r--r--test/667-checker-simd-alignment/info.txt1
-rw-r--r--test/667-checker-simd-alignment/src/Main.java337
-rw-r--r--test/668-aiobe/expected.txt0
-rw-r--r--test/668-aiobe/info.txt2
-rw-r--r--test/668-aiobe/smali/TestCase.smali30
-rw-r--r--test/668-aiobe/src/Main.java36
-rw-r--r--test/706-checker-scheduler/src/Main.java77
-rw-r--r--test/910-methods/check10
-rw-r--r--test/911-get-stack-trace/check14
-rw-r--r--test/911-get-stack-trace/expected_d8.diff456
-rwxr-xr-xtest/988-method-trace/gen_srcs.py9
-rw-r--r--test/988-method-trace/src/art/Test988Intrinsics.java2
-rw-r--r--test/992-source-data/expected.txt12
-rw-r--r--test/992-source-data/source_file.cc13
-rw-r--r--test/992-source-data/src/art/Test992.java21
-rw-r--r--test/993-breakpoints/breakpoints.cc51
-rw-r--r--test/993-breakpoints/expected.txt101
-rw-r--r--test/993-breakpoints/src/art/Test993.java178
-rw-r--r--test/Android.bp1
-rwxr-xr-xtest/etc/run-test-jar10
-rwxr-xr-xtest/run-test6
-rw-r--r--test/testrunner/env.py12
-rwxr-xr-xtest/testrunner/run_build_test_target.py1
-rwxr-xr-xtest/testrunner/testrunner.py14
-rw-r--r--tools/ahat/Android.mk70
-rw-r--r--tools/ahat/etc/ahat_api.txt327
-rw-r--r--tools/ahat/etc/ahat_api_msg.txt5
-rw-r--r--tools/ahat/etc/ahat_removed_api.txt0
-rw-r--r--tools/ahat/src/main/com/android/ahat/HtmlDoc.java2
-rw-r--r--tools/ahat/src/main/com/android/ahat/HtmlEscaper.java2
-rw-r--r--tools/ahat/src/main/com/android/ahat/Main.java4
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java4
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java6
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java4
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java87
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderClassObj.java2
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderInstance.java4
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java2
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/HprofFormatException.java4
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Parser.java2
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Reference.java2
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/RootType.java2
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Site.java2
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java10
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Type.java2
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Value.java20
-rw-r--r--tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java2
-rw-r--r--tools/ahat/src/test-dump/Main.java11
-rw-r--r--tools/ahat/src/test/com/android/ahat/InstanceTest.java49
-rw-r--r--tools/breakpoint-logger/Android.bp66
-rw-r--r--tools/breakpoint-logger/README.md54
-rw-r--r--tools/breakpoint-logger/breakpoint_logger.cc447
-rwxr-xr-xtools/buildbot-build.sh2
-rw-r--r--tools/cpp-define-generator/offsets_all.def1
-rwxr-xr-xtools/golem/build-target.sh12
-rw-r--r--tools/libjdwp_art_failures.txt7
-rw-r--r--tools/libjdwp_oj_art_failures.txt65
-rwxr-xr-xtools/run-jdwp-tests.sh2
-rwxr-xr-xtools/run-libcore-tests.sh2
-rwxr-xr-xtools/run-libjdwp-tests.sh80
478 files changed, 13716 insertions, 5130 deletions
diff --git a/Android.bp b/Android.bp
index 569179dc11..295ae4c556 100644
--- a/Android.bp
+++ b/Android.bp
@@ -12,6 +12,7 @@ art_static_dependencies = [
"libcutils",
"libunwindbacktrace",
"libunwind",
+ "libunwindstack",
"libutils",
"libbase",
"liblz4",
@@ -40,6 +41,7 @@ subdirs = [
"sigchainlib",
"simulator",
"test",
+ "tools/breakpoint-logger",
"tools/cpp-define-generator",
"tools/dmtracedump",
"tools/titrace",
diff --git a/build/Android.bp b/build/Android.bp
index 2c959d46f5..2a5598fb7a 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -4,6 +4,7 @@ bootstrap_go_package {
deps: [
"blueprint",
"blueprint-pathtools",
+ "blueprint-proptools",
"soong",
"soong-android",
"soong-cc",
@@ -126,15 +127,9 @@ art_global_defaults {
},
include_dirs: [
- "external/icu/icu4c/source/common",
- "external/lz4/lib",
"external/valgrind/include",
"external/valgrind",
"external/vixl/src",
- "external/zlib",
- ],
- header_libs: [
- "jni_platform_headers",
],
tidy_checks: [
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 0f92a25366..7769aad1df 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -100,7 +100,7 @@ $(ART_TEST_TARGET_GTEST_VerifierDepsMulti_DEX): $(ART_TEST_GTEST_VerifierDepsMul
$(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^)
# Dex file dependencies for each gtest.
-ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested
+ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary MyClassNatives Nested VerifierDeps VerifierDepsMulti
ART_GTEST_atomic_dex_ref_map_test_DEX_DEPS := Interfaces
ART_GTEST_class_linker_test_DEX_DEPS := AllFields ErroneousA ErroneousB ErroneousInit ForClassLoaderA ForClassLoaderB ForClassLoaderC ForClassLoaderD Interfaces MethodTypes MultiDex MyClass Nested Statics StaticsFromCode
@@ -110,7 +110,7 @@ ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods Prof
ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested MultiDex
ART_GTEST_dexlayout_test_DEX_DEPS := ManyMethods
-ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps
+ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ManyMethods Statics VerifierDeps
ART_GTEST_dex2oat_image_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB DefaultMethods
@@ -362,11 +362,12 @@ $$(gtest_rule): test-art-target-sync
$(hide) adb shell rm $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID
$(hide) adb shell chmod 755 $$(PRIVATE_TARGET_EXE)
$(hide) $$(call ART_TEST_SKIP,$$@) && \
- (adb shell "$(GCOV_ENV) LD_LIBRARY_PATH=$(4) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \
- $$(PRIVATE_TARGET_EXE) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID" \
- && (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID /tmp/ \
- && $$(call ART_TEST_PASSED,$$@)) \
- || $$(call ART_TEST_FAILED,$$@))
+ (adb shell "env $(GCOV_ENV) LD_LIBRARY_PATH=$(4) \
+ ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) $$(PRIVATE_TARGET_EXE) \
+ && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID" \
+ && (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID /tmp/ \
+ && $$(call ART_TEST_PASSED,$$@)) \
+ || $$(call ART_TEST_FAILED,$$@))
$(hide) rm -f /tmp/$$@-$$$$PPID
ART_TEST_TARGET_GTEST$($(3)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += $$(gtest_rule)
@@ -379,17 +380,20 @@ valgrind-$$(gtest_rule): $(ART_VALGRIND_TARGET_DEPENDENCIES) test-art-target-syn
$(hide) adb shell rm $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID
$(hide) adb shell chmod 755 $$(PRIVATE_TARGET_EXE)
$(hide) $$(call ART_TEST_SKIP,$$@) && \
- (adb shell "$(GCOV_ENV) LD_LIBRARY_PATH=$(4) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \
- valgrind --leak-check=full --error-exitcode=1 --workaround-gcc296-bugs=yes \
- --suppressions=$(ART_TARGET_TEST_DIR)/valgrind-target-suppressions.txt \
- --num-callers=50 --show-mismatched-frees=no \
- $$(PRIVATE_TARGET_EXE) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID" \
- && (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID /tmp/ \
- && $$(call ART_TEST_PASSED,$$@)) \
- || $$(call ART_TEST_FAILED,$$@))
+ (adb shell "env $(GCOV_ENV) LD_LIBRARY_PATH=$(4) \
+ ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \
+ $$$$ANDROID_ROOT/bin/valgrind \
+ --leak-check=full --error-exitcode=1 --workaround-gcc296-bugs=yes \
+ --suppressions=$(ART_TARGET_TEST_DIR)/valgrind-target-suppressions.txt \
+ --num-callers=50 --show-mismatched-frees=no $$(PRIVATE_TARGET_EXE) \
+ && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID" \
+ && (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID /tmp/ \
+ && $$(call ART_TEST_PASSED,$$@)) \
+ || $$(call ART_TEST_FAILED,$$@))
$(hide) rm -f /tmp/$$@-$$$$PPID
- ART_TEST_TARGET_VALGRIND_GTEST$$($(3)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += valgrind-$$(gtest_rule)
+ ART_TEST_TARGET_VALGRIND_GTEST$$($(3)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += \
+ valgrind-$$(gtest_rule)
ART_TEST_TARGET_VALGRIND_GTEST_RULES += valgrind-$$(gtest_rule)
ART_TEST_TARGET_VALGRIND_GTEST_$(1)_RULES += valgrind-$$(gtest_rule)
diff --git a/build/art.go b/build/art.go
index 1bcaf51a1d..4e48d2d932 100644
--- a/build/art.go
+++ b/build/art.go
@@ -19,6 +19,8 @@ import (
"android/soong/cc"
"fmt"
"sync"
+
+ "github.com/google/blueprint/proptools"
)
var supportedArches = []string{"arm", "arm64", "mips", "mips64", "x86", "x86_64"}
@@ -97,6 +99,11 @@ func globalFlags(ctx android.BaseContext) ([]string, []string) {
asflags = append(asflags, "-DART_ENABLE_ADDRESS_SANITIZER=1")
}
+ if envTrue(ctx, "ART_MIPS32_CHECK_ALIGNMENT") {
+ // Enable the use of MIPS32 CHECK_ALIGNMENT macro for debugging purposes
+ asflags = append(asflags, "-DART_MIPS32_CHECK_ALIGNMENT")
+ }
+
return cflags, asflags
}
@@ -205,31 +212,33 @@ func debugDefaults(ctx android.LoadHookContext) {
func customLinker(ctx android.LoadHookContext) {
linker := envDefault(ctx, "CUSTOM_TARGET_LINKER", "")
- if linker != "" {
- type props struct {
- DynamicLinker string
- }
+ type props struct {
+ DynamicLinker string
+ }
- p := &props{}
+ p := &props{}
+ if linker != "" {
p.DynamicLinker = linker
- ctx.AppendProperties(p)
}
+
+ ctx.AppendProperties(p)
}
func prefer32Bit(ctx android.LoadHookContext) {
- if envTrue(ctx, "HOST_PREFER_32_BIT") {
- type props struct {
- Target struct {
- Host struct {
- Compile_multilib string
- }
+ type props struct {
+ Target struct {
+ Host struct {
+ Compile_multilib *string
}
}
+ }
- p := &props{}
- p.Target.Host.Compile_multilib = "prefer32"
- ctx.AppendProperties(p)
+ p := &props{}
+ if envTrue(ctx, "HOST_PREFER_32_BIT") {
+ p.Target.Host.Compile_multilib = proptools.StringPtr("prefer32")
}
+
+ ctx.AppendProperties(p)
}
func testMap(config android.Config) map[string][]string {
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h
index c5d3a6b454..60dfdce03b 100644
--- a/cmdline/cmdline.h
+++ b/cmdline/cmdline.h
@@ -26,6 +26,7 @@
#include "android-base/stringprintf.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/stringpiece.h"
#include "noop_compiler_callbacks.h"
@@ -147,7 +148,7 @@ struct CmdlineArgs {
} else if (option.starts_with("--instruction-set=")) {
StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data();
instruction_set_ = GetInstructionSetFromString(instruction_set_str.data());
- if (instruction_set_ == kNone) {
+ if (instruction_set_ == InstructionSet::kNone) {
fprintf(stderr, "Unsupported instruction set %s\n", instruction_set_str.data());
PrintUsage();
return false;
@@ -262,7 +263,7 @@ struct CmdlineArgs {
DBG_LOG << "boot_image_location parent_dir_name was " << parent_dir_name;
- if (GetInstructionSetFromString(parent_dir_name.c_str()) != kNone) {
+ if (GetInstructionSetFromString(parent_dir_name.c_str()) != InstructionSet::kNone) {
*error_msg = "Do not specify the architecture as part of the boot image location";
return false;
}
diff --git a/compiler/Android.bp b/compiler/Android.bp
index c2984e1743..859947108e 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -191,11 +191,11 @@ art_cc_defaults {
shared_libs: [
"libbase",
"liblzma",
- "libnativehelper",
],
include_dirs: ["art/disassembler"],
header_libs: [
"art_cmdlineparser_headers", // For compiler_options.
+ "libnativehelper_header_only",
],
export_include_dirs: ["."],
@@ -407,7 +407,10 @@ art_cc_test {
},
},
- header_libs: ["libart_simulator_headers"],
+ header_libs: [
+ "libart_simulator_headers",
+ "libnativehelper_header_only",
+ ],
shared_libs: [
"libartd-compiler",
@@ -416,10 +419,13 @@ art_cc_test {
"libvixld-arm64",
"libbacktrace",
- "libnativehelper",
"libnativeloader",
],
+ include_dirs: [
+ "external/zlib",
+ ],
+
target: {
host: {
shared_libs: [
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index 866a4d57a7..29ff235cea 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -68,7 +68,7 @@ class CFITest : public dwarf::DwarfTest {
: &Thread::DumpThreadOffset<PointerSize::k32>);
std::unique_ptr<Disassembler> disasm(Disassembler::Create(isa, opts));
std::stringstream stream;
- const uint8_t* base = actual_asm.data() + (isa == kThumb2 ? 1 : 0);
+ const uint8_t* base = actual_asm.data() + (isa == InstructionSet::kThumb2 ? 1 : 0);
disasm->Dump(stream, base, base + actual_asm.size());
ReformatAsm(&stream, &lines);
// Print CFI and assembly interleaved.
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 111469fe89..fc6a717aa6 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -61,14 +61,14 @@ size_t CompiledCode::CodeDelta() const {
size_t CompiledCode::CodeDelta(InstructionSet instruction_set) {
switch (instruction_set) {
- case kArm:
- case kArm64:
- case kMips:
- case kMips64:
- case kX86:
- case kX86_64:
+ case InstructionSet::kArm:
+ case InstructionSet::kArm64:
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
return 0;
- case kThumb2: {
+ case InstructionSet::kThumb2: {
// +1 to set the low-order bit so a BLX will switch to Thumb mode
return 1;
}
@@ -80,14 +80,14 @@ size_t CompiledCode::CodeDelta(InstructionSet instruction_set) {
const void* CompiledCode::CodePointer(const void* code_pointer, InstructionSet instruction_set) {
switch (instruction_set) {
- case kArm:
- case kArm64:
- case kMips:
- case kMips64:
- case kX86:
- case kX86_64:
+ case InstructionSet::kArm:
+ case InstructionSet::kArm64:
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
return code_pointer;
- case kThumb2: {
+ case InstructionSet::kThumb2: {
uintptr_t address = reinterpret_cast<uintptr_t>(code_pointer);
// Set the low-order bit so a BLX will switch to Thumb mode
address |= 0x1;
diff --git a/compiler/debug/dwarf/dwarf_test.h b/compiler/debug/dwarf/dwarf_test.h
index b30ff143d3..5405759c1f 100644
--- a/compiler/debug/dwarf/dwarf_test.h
+++ b/compiler/debug/dwarf/dwarf_test.h
@@ -60,7 +60,8 @@ class DwarfTest : public CommonRuntimeTest {
template<typename ElfTypes>
std::vector<std::string> Objdump(const char* args) {
// Write simple elf file with just the DWARF sections.
- InstructionSet isa = (sizeof(typename ElfTypes::Addr) == 8) ? kX86_64 : kX86;
+ InstructionSet isa =
+ (sizeof(typename ElfTypes::Addr) == 8) ? InstructionSet::kX86_64 : InstructionSet::kX86;
ScratchFile file;
linker::FileOutputStream output_stream(file.GetFile());
linker::ElfBuilder<ElfTypes> builder(isa, nullptr, &output_stream);
diff --git a/compiler/debug/elf_debug_frame_writer.h b/compiler/debug/elf_debug_frame_writer.h
index 6dacdfa48c..d0c98a7b79 100644
--- a/compiler/debug/elf_debug_frame_writer.h
+++ b/compiler/debug/elf_debug_frame_writer.h
@@ -37,8 +37,8 @@ static void WriteCIE(InstructionSet isa,
// debugger that its value in the previous frame is not recoverable.
bool is64bit = Is64BitInstructionSet(isa);
switch (isa) {
- case kArm:
- case kThumb2: {
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2: {
dwarf::DebugFrameOpCodeWriter<> opcodes;
opcodes.DefCFA(Reg::ArmCore(13), 0); // R13(SP).
// core registers.
@@ -61,7 +61,7 @@ static void WriteCIE(InstructionSet isa,
WriteCIE(is64bit, return_reg, opcodes, format, buffer);
return;
}
- case kArm64: {
+ case InstructionSet::kArm64: {
dwarf::DebugFrameOpCodeWriter<> opcodes;
opcodes.DefCFA(Reg::Arm64Core(31), 0); // R31(SP).
// core registers.
@@ -84,8 +84,8 @@ static void WriteCIE(InstructionSet isa,
WriteCIE(is64bit, return_reg, opcodes, format, buffer);
return;
}
- case kMips:
- case kMips64: {
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64: {
dwarf::DebugFrameOpCodeWriter<> opcodes;
opcodes.DefCFA(Reg::MipsCore(29), 0); // R29(SP).
// core registers.
@@ -108,7 +108,7 @@ static void WriteCIE(InstructionSet isa,
WriteCIE(is64bit, return_reg, opcodes, format, buffer);
return;
}
- case kX86: {
+ case InstructionSet::kX86: {
// FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296
constexpr bool generate_opcodes_for_x86_fp = false;
dwarf::DebugFrameOpCodeWriter<> opcodes;
@@ -134,7 +134,7 @@ static void WriteCIE(InstructionSet isa,
WriteCIE(is64bit, return_reg, opcodes, format, buffer);
return;
}
- case kX86_64: {
+ case InstructionSet::kX86_64: {
dwarf::DebugFrameOpCodeWriter<> opcodes;
opcodes.DefCFA(Reg::X86_64Core(4), 8); // R4(RSP).
opcodes.Offset(Reg::X86_64Core(16), -8); // R16(RIP).
@@ -160,7 +160,7 @@ static void WriteCIE(InstructionSet isa,
WriteCIE(is64bit, return_reg, opcodes, format, buffer);
return;
}
- case kNone:
+ case InstructionSet::kNone:
break;
}
LOG(FATAL) << "Cannot write CIE frame for ISA " << isa;
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 49d52c45c2..6e72b46174 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -68,19 +68,19 @@ class ElfDebugLineWriter {
int code_factor_bits_ = 0;
int dwarf_isa = -1;
switch (isa) {
- case kArm: // arm actually means thumb2.
- case kThumb2:
+ case InstructionSet::kArm: // arm actually means thumb2.
+ case InstructionSet::kThumb2:
code_factor_bits_ = 1; // 16-bit instuctions
dwarf_isa = 1; // DW_ISA_ARM_thumb.
break;
- case kArm64:
- case kMips:
- case kMips64:
+ case InstructionSet::kArm64:
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64:
code_factor_bits_ = 2; // 32-bit instructions
break;
- case kNone:
- case kX86:
- case kX86_64:
+ case InstructionSet::kNone:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
break;
}
std::unordered_set<uint64_t> seen_addresses(compilation_unit.methods.size());
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index bf47e8f3d9..bb856b29f4 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -33,20 +33,20 @@ using Reg = dwarf::Reg;
static Reg GetDwarfCoreReg(InstructionSet isa, int machine_reg) {
switch (isa) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return Reg::ArmCore(machine_reg);
- case kArm64:
+ case InstructionSet::kArm64:
return Reg::Arm64Core(machine_reg);
- case kX86:
+ case InstructionSet::kX86:
return Reg::X86Core(machine_reg);
- case kX86_64:
+ case InstructionSet::kX86_64:
return Reg::X86_64Core(machine_reg);
- case kMips:
+ case InstructionSet::kMips:
return Reg::MipsCore(machine_reg);
- case kMips64:
+ case InstructionSet::kMips64:
return Reg::Mips64Core(machine_reg);
- case kNone:
+ case InstructionSet::kNone:
LOG(FATAL) << "No instruction set";
}
UNREACHABLE();
@@ -54,20 +54,20 @@ static Reg GetDwarfCoreReg(InstructionSet isa, int machine_reg) {
static Reg GetDwarfFpReg(InstructionSet isa, int machine_reg) {
switch (isa) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return Reg::ArmFp(machine_reg);
- case kArm64:
+ case InstructionSet::kArm64:
return Reg::Arm64Fp(machine_reg);
- case kX86:
+ case InstructionSet::kX86:
return Reg::X86Fp(machine_reg);
- case kX86_64:
+ case InstructionSet::kX86_64:
return Reg::X86_64Fp(machine_reg);
- case kMips:
+ case InstructionSet::kMips:
return Reg::MipsFp(machine_reg);
- case kMips64:
+ case InstructionSet::kMips64:
return Reg::Mips64Fp(machine_reg);
- case kNone:
+ case InstructionSet::kNone:
LOG(FATAL) << "No instruction set";
}
UNREACHABLE();
@@ -230,7 +230,7 @@ static void WriteDebugLocEntry(const MethodDebugInfo* method_info,
break; // the high word is correctly implied by the low word.
}
} else if (kind == Kind::kInFpuRegister) {
- if ((isa == kArm || isa == kThumb2) &&
+ if ((isa == InstructionSet::kArm || isa == InstructionSet::kThumb2) &&
piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegister &&
reg_hi.GetValue() == value + 1 && value % 2 == 0) {
// Translate S register pair to D register (e.g. S4+S5 to D2).
diff --git a/compiler/debug/elf_symtab_writer.h b/compiler/debug/elf_symtab_writer.h
index b37f984860..0907e102a0 100644
--- a/compiler/debug/elf_symtab_writer.h
+++ b/compiler/debug/elf_symtab_writer.h
@@ -89,7 +89,7 @@ static void WriteDebugSymbols(linker::ElfBuilder<ElfTypes>* builder,
// instructions, so that disassembler tools can correctly disassemble.
// Note that even if we generate just a single mapping symbol, ARM's Streamline
// requires it to match function symbol. Just address 0 does not work.
- if (info.isa == kThumb2) {
+ if (info.isa == InstructionSet::kThumb2) {
if (address < mapping_symbol_address || !kGenerateSingleArmMappingSymbol) {
symtab->Add(strtab->Write("$t"), text, address & ~1, 0, STB_LOCAL, STT_NOTYPE);
mapping_symbol_address = address;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 7581962a86..a94dbe94ff 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -381,9 +381,9 @@ CompiledMethod* ArtCompileDEX(
quicken_data.push_back(static_cast<uint8_t>(info.dex_member_index >> 8));
}
InstructionSet instruction_set = driver->GetInstructionSet();
- if (instruction_set == kThumb2) {
+ if (instruction_set == InstructionSet::kThumb2) {
// Don't use the thumb2 instruction set to avoid the one off code delta.
- instruction_set = kArm;
+ instruction_set = InstructionSet::kArm;
}
return CompiledMethod::SwapAllocCompiledMethod(
driver,
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index 925863ef0e..518b0ece73 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -302,7 +302,8 @@ bool DoAnalyseConstructor(const DexFile::CodeItem* code_item,
uint16_t this_vreg = code_item->registers_size_ - code_item->ins_size_;
uint16_t zero_vreg_mask = 0u;
- for (const Instruction& instruction : code_item->Instructions()) {
+ for (const DexInstructionPcPair& pair : code_item->Instructions()) {
+ const Instruction& instruction = pair.Inst();
if (instruction.Opcode() == Instruction::RETURN_VOID) {
break;
} else if (instruction.Opcode() == Instruction::INVOKE_DIRECT) {
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 9c5b63232e..df75e07c3f 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -64,12 +64,11 @@ void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifi
if (method_verifier->HasFailures()) {
return;
}
- IterationRange<DexInstructionIterator> instructions = method_verifier->CodeItem()->Instructions();
- for (auto it = instructions.begin(); it != instructions.end(); ++it) {
- const Instruction& inst = *it;
+ for (const DexInstructionPcPair& pair : method_verifier->CodeItem()->Instructions()) {
+ const Instruction& inst = pair.Inst();
const Instruction::Code code = inst.Opcode();
if (code == Instruction::CHECK_CAST) {
- const uint32_t dex_pc = it.GetDexPC(instructions.begin());
+ const uint32_t dex_pc = pair.DexPc();
if (!method_verifier->GetInstructionFlags(dex_pc).IsVisited()) {
// Do not attempt to quicken this instruction, it's unreachable anyway.
continue;
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index e1ea6304eb..de481caf07 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -31,7 +31,7 @@ TEST(CompiledMethodStorage, Deduplicate) {
CompilerDriver driver(&compiler_options,
&verification_results,
Compiler::kOptimizing,
- /* instruction_set_ */ kNone,
+ /* instruction_set_ */ InstructionSet::kNone,
/* instruction_set_features */ nullptr,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
@@ -91,7 +91,7 @@ TEST(CompiledMethodStorage, Deduplicate) {
for (auto&& f : cfi_info) {
for (auto&& p : patches) {
compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod(
- &driver, kNone, c, 0u, 0u, 0u, s, v, f, p));
+ &driver, InstructionSet::kNone, c, 0u, 0u, 0u, s, v, f, p));
}
}
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 7573367788..a9d27ef0cc 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -66,6 +66,7 @@
#include "nativehelper/ScopedLocalRef.h"
#include "object_lock.h"
#include "runtime.h"
+#include "runtime_intrinsics.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "thread_list.h"
@@ -289,13 +290,15 @@ CompilerDriver::CompilerDriver(
verification_results_(verification_results),
compiler_(Compiler::Create(this, compiler_kind)),
compiler_kind_(compiler_kind),
- instruction_set_(instruction_set == kArm ? kThumb2 : instruction_set),
+ instruction_set_(
+ instruction_set == InstructionSet::kArm ? InstructionSet::kThumb2 : instruction_set),
instruction_set_features_(instruction_set_features),
requires_constructor_barrier_lock_("constructor barrier lock"),
non_relative_linker_patch_count_(0u),
image_classes_(image_classes),
classes_to_compile_(compiled_classes),
methods_to_compile_(compiled_methods),
+ number_of_soft_verifier_failures_(0),
had_hard_verifier_failure_(false),
parallel_thread_count_(thread_count),
stats_(new AOTCompilationStats),
@@ -317,6 +320,8 @@ CompilerDriver::CompilerDriver(
if (GetCompilerOptions().IsBootImage()) {
CHECK(image_classes_.get() != nullptr) << "Expected image classes for boot image";
}
+
+ compiled_method_storage_.SetDedupeEnabled(compiler_options_->DeduplicateCode());
}
CompilerDriver::~CompilerDriver() {
@@ -364,28 +369,6 @@ std::unique_ptr<const std::vector<uint8_t>> CompilerDriver::CreateQuickToInterpr
}
#undef CREATE_TRAMPOLINE
-static void SetupIntrinsic(Thread* self,
- Intrinsics intrinsic,
- InvokeType invoke_type,
- const char* class_name,
- const char* method_name,
- const char* signature)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- PointerSize image_size = class_linker->GetImagePointerSize();
- ObjPtr<mirror::Class> cls = class_linker->FindSystemClass(self, class_name);
- if (cls == nullptr) {
- LOG(FATAL) << "Could not find class of intrinsic " << class_name;
- }
- ArtMethod* method = cls->FindClassMethod(method_name, signature, image_size);
- if (method == nullptr || method->GetDeclaringClass() != cls) {
- LOG(FATAL) << "Could not find method of intrinsic "
- << class_name << " " << method_name << " " << signature;
- }
- DCHECK_EQ(method->GetInvokeType(), invoke_type);
- method->SetIntrinsic(static_cast<uint32_t>(intrinsic));
-}
-
void CompilerDriver::CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings) {
@@ -404,14 +387,7 @@ void CompilerDriver::CompileAll(jobject class_loader,
// We don't need to setup the intrinsics for non boot image compilation, as
// those compilations will pick up a boot image that have the ArtMethod already
// set with the intrinsics flag.
- ScopedObjectAccess soa(Thread::Current());
-#define SETUP_INTRINSICS(Name, InvokeType, NeedsEnvironmentOrCache, SideEffects, Exceptions, \
- ClassName, MethodName, Signature) \
- SetupIntrinsic(soa.Self(), Intrinsics::k##Name, InvokeType, ClassName, MethodName, Signature);
-#include "intrinsics_list.h"
- INTRINSICS_LIST(SETUP_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef SETUP_INTRINSICS
+ InitializeIntrinsics();
}
// Compile:
// 1) Compile all classes and methods enabled for compilation. May fall back to dex-to-dex
@@ -478,13 +454,13 @@ static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
// GetQuickGenericJniStub allowing down calls that aren't compiled using a JNI compiler?
static bool InstructionSetHasGenericJniStub(InstructionSet isa) {
switch (isa) {
- case kArm:
- case kArm64:
- case kThumb2:
- case kMips:
- case kMips64:
- case kX86:
- case kX86_64: return true;
+ case InstructionSet::kArm:
+ case InstructionSet::kArm64:
+ case InstructionSet::kThumb2:
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64: return true;
default: return false;
}
}
@@ -763,13 +739,13 @@ static void ResolveConstStrings(Handle<mirror::DexCache> dex_cache,
}
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- for (const Instruction& inst : code_item->Instructions()) {
- switch (inst.Opcode()) {
+ for (const DexInstructionPcPair& inst : code_item->Instructions()) {
+ switch (inst->Opcode()) {
case Instruction::CONST_STRING:
case Instruction::CONST_STRING_JUMBO: {
- dex::StringIndex string_index((inst.Opcode() == Instruction::CONST_STRING)
- ? inst.VRegB_21c()
- : inst.VRegB_31c());
+ dex::StringIndex string_index((inst->Opcode() == Instruction::CONST_STRING)
+ ? inst->VRegB_21c()
+ : inst->VRegB_31c());
mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
CHECK(string != nullptr) << "Could not allocate a string when forcing determinism";
break;
@@ -923,6 +899,12 @@ void CompilerDriver::PreCompile(jobject class_loader,
LOG(FATAL_WITHOUT_ABORT) << "Had a hard failure verifying all classes, and was asked to abort "
<< "in such situations. Please check the log.";
abort();
+ } else if (number_of_soft_verifier_failures_ > 0 &&
+ GetCompilerOptions().AbortOnSoftVerifierFailure()) {
+ LOG(FATAL_WITHOUT_ABORT) << "Had " << number_of_soft_verifier_failures_ << " soft failure(s) "
+ << "verifying all classes, and was asked to abort in such situations. "
+ << "Please check the log.";
+ abort();
}
if (compiler_options_->IsAnyCompilationEnabled()) {
@@ -2069,13 +2051,13 @@ class VerifyClassVisitor : public CompilationVisitor {
LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
<< " because: " << error_msg;
manager_->GetCompiler()->SetHadHardVerifierFailure();
+ } else if (failure_kind == verifier::FailureKind::kSoftFailure) {
+ manager_->GetCompiler()->AddSoftVerifierFailure();
} else {
// Force a soft failure for the VerifierDeps. This is a sanity measure, as
// the vdex file already records that the class hasn't been resolved. It avoids
// trying to do future verification optimizations when processing the vdex file.
- DCHECK(failure_kind == verifier::FailureKind::kSoftFailure ||
- failure_kind == verifier::FailureKind::kNoFailure)
- << failure_kind;
+ DCHECK(failure_kind == verifier::FailureKind::kNoFailure) << failure_kind;
failure_kind = verifier::FailureKind::kSoftFailure;
}
} else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
@@ -2087,6 +2069,8 @@ class VerifyClassVisitor : public CompilationVisitor {
CHECK(soa.Self()->IsExceptionPending());
soa.Self()->ClearException();
manager_->GetCompiler()->SetHadHardVerifierFailure();
+ } else if (failure_kind == verifier::FailureKind::kSoftFailure) {
+ manager_->GetCompiler()->AddSoftVerifierFailure();
}
CHECK(klass->ShouldVerifyAtRuntime() || klass->IsVerified() || klass->IsErroneous())
@@ -2152,7 +2136,9 @@ void CompilerDriver::VerifyDexFile(jobject class_loader,
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
thread_pool);
- verifier::HardFailLogMode log_level = GetCompilerOptions().AbortOnHardVerifierFailure()
+ bool abort_on_verifier_failures = GetCompilerOptions().AbortOnHardVerifierFailure()
+ || GetCompilerOptions().AbortOnSoftVerifierFailure();
+ verifier::HardFailLogMode log_level = abort_on_verifier_failures
? verifier::HardFailLogMode::kLogInternalFatal
: verifier::HardFailLogMode::kLogWarning;
VerifyClassVisitor visitor(&context, log_level);
@@ -2433,14 +2419,14 @@ class InitializeClassVisitor : public CompilationVisitor {
if (clinit != nullptr) {
const DexFile::CodeItem* code_item = clinit->GetCodeItem();
DCHECK(code_item != nullptr);
- for (const Instruction& inst : code_item->Instructions()) {
- if (inst.Opcode() == Instruction::CONST_STRING) {
+ for (const DexInstructionPcPair& inst : code_item->Instructions()) {
+ if (inst->Opcode() == Instruction::CONST_STRING) {
ObjPtr<mirror::String> s = class_linker->ResolveString(
- *dex_file, dex::StringIndex(inst.VRegB_21c()), h_dex_cache);
+ *dex_file, dex::StringIndex(inst->VRegB_21c()), h_dex_cache);
CHECK(s != nullptr);
- } else if (inst.Opcode() == Instruction::CONST_STRING_JUMBO) {
+ } else if (inst->Opcode() == Instruction::CONST_STRING_JUMBO) {
ObjPtr<mirror::String> s = class_linker->ResolveString(
- *dex_file, dex::StringIndex(inst.VRegB_31c()), h_dex_cache);
+ *dex_file, dex::StringIndex(inst->VRegB_31c()), h_dex_cache);
CHECK(s != nullptr);
}
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index f16e2ed7d3..da4a580bf2 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_DRIVER_COMPILER_DRIVER_H_
#define ART_COMPILER_DRIVER_COMPILER_DRIVER_H_
+#include <atomic>
#include <set>
#include <string>
#include <unordered_set>
@@ -352,6 +353,9 @@ class CompilerDriver {
void SetHadHardVerifierFailure() {
had_hard_verifier_failure_ = true;
}
+ void AddSoftVerifierFailure() {
+ number_of_soft_verifier_failures_++;
+ }
Compiler::Kind GetCompilerKind() {
return compiler_kind_;
@@ -519,6 +523,7 @@ class CompilerDriver {
// This option may be restricted to the boot image, depending on a flag in the implementation.
std::unique_ptr<std::unordered_set<std::string>> methods_to_compile_;
+ std::atomic<uint32_t> number_of_soft_verifier_failures_;
bool had_hard_verifier_failure_;
// A thread pool that can (potentially) run tasks in parallel.
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index b6cedff28a..032763cdff 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -51,10 +51,12 @@ CompilerOptions::CompilerOptions()
compile_pic_(false),
verbose_methods_(),
abort_on_hard_verifier_failure_(false),
+ abort_on_soft_verifier_failure_(false),
init_failure_output_(nullptr),
dump_cfg_file_name_(""),
dump_cfg_append_(false),
force_determinism_(false),
+ deduplicate_code_(true),
register_allocation_strategy_(RegisterAllocator::kRegisterAllocatorDefault),
passes_to_run_(nullptr) {
}
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 311dbd569e..ab2a681468 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -226,6 +226,9 @@ class CompilerOptions FINAL {
bool AbortOnHardVerifierFailure() const {
return abort_on_hard_verifier_failure_;
}
+ bool AbortOnSoftVerifierFailure() const {
+ return abort_on_soft_verifier_failure_;
+ }
const std::vector<const DexFile*>* GetNoInlineFromDexFile() const {
return no_inline_from_;
@@ -251,6 +254,10 @@ class CompilerOptions FINAL {
return force_determinism_;
}
+ bool DeduplicateCode() const {
+ return deduplicate_code_;
+ }
+
RegisterAllocator::Strategy GetRegisterAllocationStrategy() const {
return register_allocation_strategy_;
}
@@ -303,6 +310,8 @@ class CompilerOptions FINAL {
// Abort compilation with an error if we find a class that fails verification with a hard
// failure.
bool abort_on_hard_verifier_failure_;
+ // Same for soft failures.
+ bool abort_on_soft_verifier_failure_;
// Log initialization of initialization failures to this stream if not null.
std::unique_ptr<std::ostream> init_failure_output_;
@@ -314,6 +323,9 @@ class CompilerOptions FINAL {
// outcomes.
bool force_determinism_;
+ // Whether code should be deduplicated.
+ bool deduplicate_code_;
+
RegisterAllocator::Strategy register_allocation_strategy_;
// If not null, specifies optimization passes which will be run instead of defaults.
diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h
index 9cb818a270..e28d49974a 100644
--- a/compiler/driver/compiler_options_map-inl.h
+++ b/compiler/driver/compiler_options_map-inl.h
@@ -60,6 +60,7 @@ inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string
}
map.AssignIfExists(Base::TopKProfileThreshold, &options->top_k_profile_threshold_);
map.AssignIfExists(Base::AbortOnHardVerifierFailure, &options->abort_on_hard_verifier_failure_);
+ map.AssignIfExists(Base::AbortOnSoftVerifierFailure, &options->abort_on_soft_verifier_failure_);
if (map.Exists(Base::DumpInitFailures)) {
if (!options->ParseDumpInitFailures(*map.Get(Base::DumpInitFailures), error_msg)) {
return false;
@@ -75,6 +76,7 @@ inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string
}
}
map.AssignIfExists(Base::VerboseMethods, &options->verbose_methods_);
+ options->deduplicate_code_ = map.GetOrDefault(Base::DeduplicateCode);
return true;
}
@@ -122,6 +124,11 @@ inline void AddCompilerOptionsArgumentParserOptions(Builder& b) {
.WithValues({true, false})
.IntoKey(Map::GenerateBuildID)
+ .Define({"--deduplicate-code=_"})
+ .template WithType<bool>()
+ .WithValueMap({{"false", false}, {"true", true}})
+ .IntoKey(Map::DeduplicateCode)
+
.Define("--debuggable")
.IntoKey(Map::Debuggable)
@@ -132,6 +139,9 @@ inline void AddCompilerOptionsArgumentParserOptions(Builder& b) {
.Define({"--abort-on-hard-verifier-error", "--no-abort-on-hard-verifier-error"})
.WithValues({true, false})
.IntoKey(Map::AbortOnHardVerifierFailure)
+ .Define({"--abort-on-soft-verifier-error", "--no-abort-on-soft-verifier-error"})
+ .WithValues({true, false})
+ .IntoKey(Map::AbortOnSoftVerifierFailure)
.Define("--dump-init-failures=_")
.template WithType<std::string>()
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
index 570bc5aca7..cccd6184c6 100644
--- a/compiler/driver/compiler_options_map.def
+++ b/compiler/driver/compiler_options_map.def
@@ -50,11 +50,13 @@ COMPILER_OPTIONS_KEY (bool, GenerateBuildID)
COMPILER_OPTIONS_KEY (Unit, Debuggable)
COMPILER_OPTIONS_KEY (double, TopKProfileThreshold)
COMPILER_OPTIONS_KEY (bool, AbortOnHardVerifierFailure)
+COMPILER_OPTIONS_KEY (bool, AbortOnSoftVerifierFailure)
COMPILER_OPTIONS_KEY (std::string, DumpInitFailures)
COMPILER_OPTIONS_KEY (std::string, DumpCFG)
COMPILER_OPTIONS_KEY (Unit, DumpCFGAppend)
// TODO: Add type parser.
COMPILER_OPTIONS_KEY (std::string, RegisterAllocationStrategy)
COMPILER_OPTIONS_KEY (ParseStringList<','>, VerboseMethods)
+COMPILER_OPTIONS_KEY (bool, DeduplicateCode, true)
#undef COMPILER_OPTIONS_KEY
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index f759aa5ef8..897b50bdac 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -61,7 +61,8 @@ class ExceptionTest : public CommonRuntimeTest {
}
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stack_maps(&allocator, kRuntimeISA);
stack_maps.BeginStackMapEntry(/* dex_pc */ 3u,
/* native_pc_offset */ 3u,
@@ -97,7 +98,7 @@ class ExceptionTest : public CommonRuntimeTest {
static_cast<const void*>(fake_header_code_and_maps_.data() +
(fake_header_code_and_maps_.size() - code_size)));
- if (kRuntimeISA == kArm) {
+ if (kRuntimeISA == InstructionSet::kArm) {
// Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer().
CHECK_ALIGNED(stack_maps_offset, 2);
}
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 5b57718456..236b5c0c2e 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -102,13 +102,13 @@ class JNICFITest : public CFITest {
}
};
-#define TEST_ISA(isa) \
- TEST_F(JNICFITest, isa) { \
- std::vector<uint8_t> expected_asm(expected_asm_##isa, \
- expected_asm_##isa + arraysize(expected_asm_##isa)); \
- std::vector<uint8_t> expected_cfi(expected_cfi_##isa, \
- expected_cfi_##isa + arraysize(expected_cfi_##isa)); \
- TestImpl(isa, #isa, expected_asm, expected_cfi); \
+#define TEST_ISA(isa) \
+ TEST_F(JNICFITest, isa) { \
+ std::vector<uint8_t> expected_asm(expected_asm_##isa, \
+ expected_asm_##isa + arraysize(expected_asm_##isa)); \
+ std::vector<uint8_t> expected_cfi(expected_cfi_##isa, \
+ expected_cfi_##isa + arraysize(expected_cfi_##isa)); \
+ TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi); \
}
#ifdef ART_ENABLE_CODEGEN_arm
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 42a5f86117..55c27d1a6a 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -54,38 +54,38 @@ std::unique_ptr<ManagedRuntimeCallingConvention> ManagedRuntimeCallingConvention
InstructionSet instruction_set) {
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
new (allocator) arm::ArmManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64:
+ case InstructionSet::kArm64:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
new (allocator) arm64::Arm64ManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips:
+ case InstructionSet::kMips:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
new (allocator) mips::MipsManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- case kMips64:
+ case InstructionSet::kMips64:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
new (allocator) mips64::Mips64ManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- case kX86:
+ case InstructionSet::kX86:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
new (allocator) x86::X86ManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- case kX86_64:
+ case InstructionSet::kX86_64:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
new (allocator) x86_64::X86_64ManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
@@ -156,38 +156,38 @@ std::unique_ptr<JniCallingConvention> JniCallingConvention::Create(ArenaAllocato
InstructionSet instruction_set) {
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return std::unique_ptr<JniCallingConvention>(
new (allocator) arm::ArmJniCallingConvention(
is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64:
+ case InstructionSet::kArm64:
return std::unique_ptr<JniCallingConvention>(
new (allocator) arm64::Arm64JniCallingConvention(
is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips:
+ case InstructionSet::kMips:
return std::unique_ptr<JniCallingConvention>(
new (allocator) mips::MipsJniCallingConvention(
is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- case kMips64:
+ case InstructionSet::kMips64:
return std::unique_ptr<JniCallingConvention>(
new (allocator) mips64::Mips64JniCallingConvention(
is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- case kX86:
+ case InstructionSet::kX86:
return std::unique_ptr<JniCallingConvention>(
new (allocator) x86::X86JniCallingConvention(
is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- case kX86_64:
+ case InstructionSet::kX86_64:
return std::unique_ptr<JniCallingConvention>(
new (allocator) x86_64::X86_64JniCallingConvention(
is_static, is_synchronized, is_critical_native, shorty));
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index e32b681c5b..b3177aa471 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -323,7 +323,7 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// Note that we always have outgoing param space available for at least two params.
if (kUseReadBarrier && is_static && !is_critical_native) {
const bool kReadBarrierFastPath =
- (instruction_set != kMips) && (instruction_set != kMips64);
+ (instruction_set != InstructionSet::kMips) && (instruction_set != InstructionSet::kMips64);
std::unique_ptr<JNIMacroLabel> skip_cold_path_label;
if (kReadBarrierFastPath) {
skip_cold_path_label = __ CreateLabel();
@@ -531,7 +531,8 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
if (LIKELY(!is_critical_native)) {
// For normal JNI, store the return value on the stack because the call to
// JniMethodEnd will clobber the return value. It will be restored in (13).
- if ((instruction_set == kMips || instruction_set == kMips64) &&
+ if ((instruction_set == InstructionSet::kMips ||
+ instruction_set == InstructionSet::kMips64) &&
main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
return_save_location.Uint32Value() % 8 != 0) {
// Ensure doubles are 8-byte aligned for MIPS
diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc
index 3d5683335a..48747fc379 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2.cc
@@ -47,7 +47,7 @@ constexpr uint32_t kMaxBcondPositiveDisplacement = (1u << 20) - 2u + kPcDisplace
constexpr uint32_t kMaxBcondNegativeDisplacement = (1u << 20) - kPcDisplacement;
Thumb2RelativePatcher::Thumb2RelativePatcher(RelativePatcherTargetProvider* provider)
- : ArmBaseRelativePatcher(provider, kThumb2) {
+ : ArmBaseRelativePatcher(provider, InstructionSet::kThumb2) {
}
void Thumb2RelativePatcher::PatchCall(std::vector<uint8_t>* code,
diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc
index fe76dfe39a..2c22a352c2 100644
--- a/compiler/linker/arm/relative_patcher_thumb2_test.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc
@@ -28,7 +28,7 @@ namespace linker {
class Thumb2RelativePatcherTest : public RelativePatcherTest {
public:
- Thumb2RelativePatcherTest() : RelativePatcherTest(kThumb2, "default") { }
+ Thumb2RelativePatcherTest() : RelativePatcherTest(InstructionSet::kThumb2, "default") { }
protected:
static const uint8_t kCallRawCode[];
@@ -173,7 +173,8 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest {
return false; // No thunk.
} else {
uint32_t thunk_end =
- CompiledCode::AlignCode(method3_offset - sizeof(OatQuickMethodHeader), kThumb2) +
+ CompiledCode::AlignCode(method3_offset - sizeof(OatQuickMethodHeader),
+ InstructionSet::kThumb2) +
MethodCallThunkSize();
uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
CHECK_EQ(result3.second, header_offset + sizeof(OatQuickMethodHeader) + 1 /* thumb mode */);
@@ -420,7 +421,8 @@ TEST_F(Thumb2RelativePatcherTest, CallTrampolineTooFar) {
// Check linked code.
uint32_t method3_offset = GetMethodOffset(3u);
- uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), kThumb2);
+ uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(),
+ InstructionSet::kThumb2);
uint32_t diff = thunk_offset - (method3_offset + bl_offset_in_method3 + 4u /* PC adjustment */);
ASSERT_EQ(diff & 1u, 0u);
ASSERT_LT(diff >> 1, 1u << 8); // Simple encoding, (diff >> 1) fits into 8 bits.
@@ -495,8 +497,7 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarAfter) {
ASSERT_TRUE(IsAligned<kArmAlignment>(method3_offset));
uint32_t method3_header_offset = method3_offset - sizeof(OatQuickMethodHeader);
uint32_t thunk_size = MethodCallThunkSize();
- uint32_t thunk_offset =
- RoundDown(method3_header_offset - thunk_size, GetInstructionSetAlignment(kThumb2));
+ uint32_t thunk_offset = RoundDown(method3_header_offset - thunk_size, kArmAlignment);
DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size),
method3_header_offset);
ASSERT_TRUE(IsAligned<kArmAlignment>(thunk_offset));
@@ -527,7 +528,8 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarBefore) {
// Check linked code.
uint32_t method3_offset = GetMethodOffset(3u);
- uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), kThumb2);
+ uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(),
+ InstructionSet::kThumb2);
uint32_t diff = thunk_offset - (method3_offset + bl_offset_in_method3 + 4u /* PC adjustment */);
ASSERT_EQ(diff & 1u, 0u);
ASSERT_LT(diff >> 1, 1u << 8); // Simple encoding, (diff >> 1) fits into 8 bits.
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 663e43b4ec..52a07965b9 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -76,7 +76,8 @@ inline uint32_t MaxExtraSpace(size_t num_adrp, size_t code_size) {
if (num_adrp == 0u) {
return 0u;
}
- uint32_t alignment_bytes = CompiledMethod::AlignCode(code_size, kArm64) - code_size;
+ uint32_t alignment_bytes =
+ CompiledMethod::AlignCode(code_size, InstructionSet::kArm64) - code_size;
return kAdrpThunkSize * num_adrp + alignment_bytes;
}
@@ -84,7 +85,7 @@ inline uint32_t MaxExtraSpace(size_t num_adrp, size_t code_size) {
Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherTargetProvider* provider,
const Arm64InstructionSetFeatures* features)
- : ArmBaseRelativePatcher(provider, kArm64),
+ : ArmBaseRelativePatcher(provider, InstructionSet::kArm64),
fix_cortex_a53_843419_(features->NeedFixCortexA53_843419()),
reserved_adrp_thunks_(0u),
processed_adrp_thunks_(0u) {
@@ -105,7 +106,8 @@ uint32_t Arm64RelativePatcher::ReserveSpace(uint32_t offset,
// Add thunks for previous method if any.
if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) {
size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_;
- offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks;
+ offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64) +
+ kAdrpThunkSize * num_adrp_thunks;
reserved_adrp_thunks_ = adrp_thunk_locations_.size();
}
@@ -149,7 +151,8 @@ uint32_t Arm64RelativePatcher::ReserveSpaceEnd(uint32_t offset) {
// Add thunks for the last method if any.
if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) {
size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_;
- offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks;
+ offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64) +
+ kAdrpThunkSize * num_adrp_thunks;
reserved_adrp_thunks_ = adrp_thunk_locations_.size();
}
}
@@ -159,7 +162,7 @@ uint32_t Arm64RelativePatcher::ReserveSpaceEnd(uint32_t offset) {
uint32_t Arm64RelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) {
if (fix_cortex_a53_843419_) {
if (!current_method_thunks_.empty()) {
- uint32_t aligned_offset = CompiledMethod::AlignCode(offset, kArm64);
+ uint32_t aligned_offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64);
if (kIsDebugBuild) {
CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize);
size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize;
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index 8a5b4cc8e5..05459a2a82 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -29,7 +29,7 @@ namespace linker {
class Arm64RelativePatcherTest : public RelativePatcherTest {
public:
explicit Arm64RelativePatcherTest(const std::string& variant)
- : RelativePatcherTest(kArm64, variant) { }
+ : RelativePatcherTest(InstructionSet::kArm64, variant) { }
protected:
static const uint8_t kCallRawCode[];
@@ -153,7 +153,8 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
// There may be a thunk before method2.
if (last_result.second != last_method_offset) {
// Thunk present. Check that there's only one.
- uint32_t thunk_end = CompiledCode::AlignCode(gap_end, kArm64) + MethodCallThunkSize();
+ uint32_t thunk_end =
+ CompiledCode::AlignCode(gap_end, InstructionSet::kArm64) + MethodCallThunkSize();
uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
CHECK_EQ(last_result.second, header_offset + sizeof(OatQuickMethodHeader));
}
@@ -347,7 +348,8 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
CHECK_EQ(compiled_method_refs_[0].index, 1u);
CHECK_EQ(compiled_method_refs_.size(), compiled_methods_.size());
uint32_t method1_size = compiled_methods_[0]->GetQuickCode().size();
- uint32_t thunk_offset = CompiledCode::AlignCode(method1_offset + method1_size, kArm64);
+ uint32_t thunk_offset =
+ CompiledCode::AlignCode(method1_offset + method1_size, InstructionSet::kArm64);
uint32_t b_diff = thunk_offset - (method1_offset + num_nops * 4u);
CHECK_ALIGNED(b_diff, 4u);
ASSERT_LT(b_diff, 128 * MB);
@@ -602,7 +604,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallTrampolineTooFar) {
// Check linked code.
uint32_t thunk_offset =
- CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
+ CompiledCode::AlignCode(last_method_offset + last_method_code.size(), InstructionSet::kArm64);
uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
CHECK_ALIGNED(diff, 4u);
ASSERT_LT(diff, 128 * MB);
@@ -688,8 +690,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarAfter) {
ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_offset));
uint32_t last_method_header_offset = last_method_offset - sizeof(OatQuickMethodHeader);
uint32_t thunk_size = MethodCallThunkSize();
- uint32_t thunk_offset =
- RoundDown(last_method_header_offset - thunk_size, GetInstructionSetAlignment(kArm64));
+ uint32_t thunk_offset = RoundDown(last_method_header_offset - thunk_size, kArm64Alignment);
DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size),
last_method_header_offset);
uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1);
@@ -721,7 +722,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarBefore) {
// Check linked code.
uint32_t thunk_offset =
- CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
+ CompiledCode::AlignCode(last_method_offset + last_method_code.size(), InstructionSet::kArm64);
uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
CHECK_ALIGNED(diff, 4u);
ASSERT_LT(diff, 128 * MB);
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 79412370bc..b30b55e9b4 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -417,10 +417,10 @@ class ElfBuilder FINAL {
InstructionSet isa,
const InstructionSetFeatures* features)
: Section(owner, name, type, flags, link, info, align, entsize) {
- if (isa == kMips || isa == kMips64) {
+ if (isa == InstructionSet::kMips || isa == InstructionSet::kMips64) {
bool fpu32 = false; // assume mips64 values
uint8_t isa_rev = 6; // assume mips64 values
- if (isa == kMips) {
+ if (isa == InstructionSet::kMips) {
// adjust for mips32 values
fpu32 = features->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint();
isa_rev = features->AsMipsInstructionSetFeatures()->IsR6()
@@ -430,14 +430,15 @@ class ElfBuilder FINAL {
: 1;
}
abiflags_.version = 0; // version of flags structure
- abiflags_.isa_level = (isa == kMips) ? 32 : 64;
+ abiflags_.isa_level = (isa == InstructionSet::kMips) ? 32 : 64;
abiflags_.isa_rev = isa_rev;
- abiflags_.gpr_size = (isa == kMips) ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64;
+ abiflags_.gpr_size = (isa == InstructionSet::kMips) ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64;
abiflags_.cpr1_size = fpu32 ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64;
abiflags_.cpr2_size = MIPS_AFL_REG_NONE;
// Set the fp_abi to MIPS_ABI_FP_64A for mips32 with 64-bit FPUs (ie: mips32 R5 and R6).
// Otherwise set to MIPS_ABI_FP_DOUBLE.
- abiflags_.fp_abi = (isa == kMips && !fpu32) ? MIPS_ABI_FP_64A : MIPS_ABI_FP_DOUBLE;
+ abiflags_.fp_abi =
+ (isa == InstructionSet::kMips && !fpu32) ? MIPS_ABI_FP_64A : MIPS_ABI_FP_DOUBLE;
abiflags_.isa_ext = 0;
abiflags_.ases = 0;
// To keep the code simple, we are not using odd FP reg for single floats for both
@@ -689,7 +690,7 @@ class ElfBuilder FINAL {
Elf_Word bss_address = RoundUp(text_address + text_size, kPageSize);
Elf_Word abiflags_address = RoundUp(bss_address + bss_size, kPageSize);
Elf_Word abiflags_size = 0;
- if (isa_ == kMips || isa_ == kMips64) {
+ if (isa_ == InstructionSet::kMips || isa_ == InstructionSet::kMips64) {
abiflags_size = abiflags_.GetSize();
}
Elf_Word dynstr_address = RoundUp(abiflags_address + abiflags_size, kPageSize);
@@ -835,29 +836,29 @@ class ElfBuilder FINAL {
static Elf_Ehdr MakeElfHeader(InstructionSet isa, const InstructionSetFeatures* features) {
Elf_Ehdr elf_header = Elf_Ehdr();
switch (isa) {
- case kArm:
+ case InstructionSet::kArm:
// Fall through.
- case kThumb2: {
+ case InstructionSet::kThumb2: {
elf_header.e_machine = EM_ARM;
elf_header.e_flags = EF_ARM_EABI_VER5;
break;
}
- case kArm64: {
+ case InstructionSet::kArm64: {
elf_header.e_machine = EM_AARCH64;
elf_header.e_flags = 0;
break;
}
- case kX86: {
+ case InstructionSet::kX86: {
elf_header.e_machine = EM_386;
elf_header.e_flags = 0;
break;
}
- case kX86_64: {
+ case InstructionSet::kX86_64: {
elf_header.e_machine = EM_X86_64;
elf_header.e_flags = 0;
break;
}
- case kMips: {
+ case InstructionSet::kMips: {
elf_header.e_machine = EM_MIPS;
elf_header.e_flags = (EF_MIPS_NOREORDER |
EF_MIPS_PIC |
@@ -868,7 +869,7 @@ class ElfBuilder FINAL {
: EF_MIPS_ARCH_32R2));
break;
}
- case kMips64: {
+ case InstructionSet::kMips64: {
elf_header.e_machine = EM_MIPS;
elf_header.e_flags = (EF_MIPS_NOREORDER |
EF_MIPS_PIC |
@@ -876,7 +877,7 @@ class ElfBuilder FINAL {
EF_MIPS_ARCH_64R6);
break;
}
- case kNone: {
+ case InstructionSet::kNone: {
LOG(FATAL) << "No instruction set";
break;
}
diff --git a/compiler/linker/mips/relative_patcher_mips32r6_test.cc b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
index 586e2aa8b2..629fdd535d 100644
--- a/compiler/linker/mips/relative_patcher_mips32r6_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
@@ -22,7 +22,7 @@ namespace linker {
class Mips32r6RelativePatcherTest : public RelativePatcherTest {
public:
- Mips32r6RelativePatcherTest() : RelativePatcherTest(kMips, "mips32r6") {}
+ Mips32r6RelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips, "mips32r6") {}
protected:
static const uint8_t kUnpatchedPcRelativeRawCode[];
diff --git a/compiler/linker/mips/relative_patcher_mips_test.cc b/compiler/linker/mips/relative_patcher_mips_test.cc
index ebe5406512..d876c76daa 100644
--- a/compiler/linker/mips/relative_patcher_mips_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips_test.cc
@@ -23,7 +23,7 @@ namespace linker {
class MipsRelativePatcherTest : public RelativePatcherTest {
public:
- MipsRelativePatcherTest() : RelativePatcherTest(kMips, "mips32r2") {}
+ MipsRelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips, "mips32r2") {}
protected:
static const uint8_t kUnpatchedPcRelativeRawCode[];
diff --git a/compiler/linker/mips64/relative_patcher_mips64_test.cc b/compiler/linker/mips64/relative_patcher_mips64_test.cc
index 4edcae72f6..a02f5005e8 100644
--- a/compiler/linker/mips64/relative_patcher_mips64_test.cc
+++ b/compiler/linker/mips64/relative_patcher_mips64_test.cc
@@ -23,7 +23,7 @@ namespace linker {
class Mips64RelativePatcherTest : public RelativePatcherTest {
public:
- Mips64RelativePatcherTest() : RelativePatcherTest(kMips64, "default") {}
+ Mips64RelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips64, "default") {}
protected:
static const uint8_t kUnpatchedPcRelativeRawCode[];
diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc
index dc15bb087e..13877f8f12 100644
--- a/compiler/linker/relative_patcher.cc
+++ b/compiler/linker/relative_patcher.cc
@@ -95,31 +95,31 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create(
UNUSED(provider);
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_x86
- case kX86:
+ case InstructionSet::kX86:
return std::unique_ptr<RelativePatcher>(new X86RelativePatcher());
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- case kX86_64:
+ case InstructionSet::kX86_64:
return std::unique_ptr<RelativePatcher>(new X86_64RelativePatcher());
#endif
#ifdef ART_ENABLE_CODEGEN_arm
- case kArm:
+ case InstructionSet::kArm:
// Fall through: we generate Thumb2 code for "arm".
- case kThumb2:
+ case InstructionSet::kThumb2:
return std::unique_ptr<RelativePatcher>(new Thumb2RelativePatcher(provider));
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64:
+ case InstructionSet::kArm64:
return std::unique_ptr<RelativePatcher>(
new Arm64RelativePatcher(provider, features->AsArm64InstructionSetFeatures()));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips:
+ case InstructionSet::kMips:
return std::unique_ptr<RelativePatcher>(
new MipsRelativePatcher(features->AsMipsInstructionSetFeatures()));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- case kMips64:
+ case InstructionSet::kMips64:
return std::unique_ptr<RelativePatcher>(new Mips64RelativePatcher());
#endif
default:
diff --git a/compiler/linker/x86/relative_patcher_x86_test.cc b/compiler/linker/x86/relative_patcher_x86_test.cc
index 4f74cee384..b855dec91d 100644
--- a/compiler/linker/x86/relative_patcher_x86_test.cc
+++ b/compiler/linker/x86/relative_patcher_x86_test.cc
@@ -23,7 +23,7 @@ namespace linker {
class X86RelativePatcherTest : public RelativePatcherTest {
public:
- X86RelativePatcherTest() : RelativePatcherTest(kX86, "default") { }
+ X86RelativePatcherTest() : RelativePatcherTest(InstructionSet::kX86, "default") { }
protected:
static const uint8_t kCallRawCode[];
diff --git a/compiler/linker/x86_64/relative_patcher_x86_64_test.cc b/compiler/linker/x86_64/relative_patcher_x86_64_test.cc
index ae17aa7a5f..6baa92de36 100644
--- a/compiler/linker/x86_64/relative_patcher_x86_64_test.cc
+++ b/compiler/linker/x86_64/relative_patcher_x86_64_test.cc
@@ -23,7 +23,7 @@ namespace linker {
class X86_64RelativePatcherTest : public RelativePatcherTest {
public:
- X86_64RelativePatcherTest() : RelativePatcherTest(kX86_64, "default") { }
+ X86_64RelativePatcherTest() : RelativePatcherTest(InstructionSet::kX86_64, "default") { }
protected:
static const uint8_t kCallRawCode[];
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 84f01828b2..015a6a04d3 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -42,6 +42,7 @@
#include "base/bit_utils.h"
#include "base/bit_utils_iterator.h"
+#include "base/casts.h"
#include "bytecode_utils.h"
#include "class_linker.h"
#include "compiled_method.h"
@@ -59,6 +60,7 @@
#include "parallel_move_resolver.h"
#include "scoped_thread_state_change-inl.h"
#include "ssa_liveness_analysis.h"
+#include "stack_map_stream.h"
#include "thread-current-inl.h"
#include "utils/assembler.h"
@@ -141,6 +143,158 @@ static bool CheckTypeConsistency(HInstruction* instruction) {
return true;
}
+class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllocCodeGenerator> {
+ public:
+ static std::unique_ptr<CodeGenerationData> Create(ArenaStack* arena_stack,
+ InstructionSet instruction_set) {
+ ScopedArenaAllocator allocator(arena_stack);
+ void* memory = allocator.Alloc<CodeGenerationData>(kArenaAllocCodeGenerator);
+ return std::unique_ptr<CodeGenerationData>(
+ ::new (memory) CodeGenerationData(std::move(allocator), instruction_set));
+ }
+
+ ScopedArenaAllocator* GetScopedAllocator() {
+ return &allocator_;
+ }
+
+ void AddSlowPath(SlowPathCode* slow_path) {
+ slow_paths_.emplace_back(std::unique_ptr<SlowPathCode>(slow_path));
+ }
+
+ ArrayRef<const std::unique_ptr<SlowPathCode>> GetSlowPaths() const {
+ return ArrayRef<const std::unique_ptr<SlowPathCode>>(slow_paths_);
+ }
+
+ StackMapStream* GetStackMapStream() { return &stack_map_stream_; }
+
+ void ReserveJitStringRoot(StringReference string_reference, Handle<mirror::String> string) {
+ jit_string_roots_.Overwrite(string_reference,
+ reinterpret_cast64<uint64_t>(string.GetReference()));
+ }
+
+ uint64_t GetJitStringRootIndex(StringReference string_reference) const {
+ return jit_string_roots_.Get(string_reference);
+ }
+
+ size_t GetNumberOfJitStringRoots() const {
+ return jit_string_roots_.size();
+ }
+
+ void ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
+ jit_class_roots_.Overwrite(type_reference, reinterpret_cast64<uint64_t>(klass.GetReference()));
+ }
+
+ uint64_t GetJitClassRootIndex(TypeReference type_reference) const {
+ return jit_class_roots_.Get(type_reference);
+ }
+
+ size_t GetNumberOfJitClassRoots() const {
+ return jit_class_roots_.size();
+ }
+
+ size_t GetNumberOfJitRoots() const {
+ return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots();
+ }
+
+ void EmitJitRoots(Handle<mirror::ObjectArray<mirror::Object>> roots)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ CodeGenerationData(ScopedArenaAllocator&& allocator, InstructionSet instruction_set)
+ : allocator_(std::move(allocator)),
+ stack_map_stream_(&allocator_, instruction_set),
+ slow_paths_(allocator_.Adapter(kArenaAllocCodeGenerator)),
+ jit_string_roots_(StringReferenceValueComparator(),
+ allocator_.Adapter(kArenaAllocCodeGenerator)),
+ jit_class_roots_(TypeReferenceValueComparator(),
+ allocator_.Adapter(kArenaAllocCodeGenerator)) {
+ slow_paths_.reserve(kDefaultSlowPathsCapacity);
+ }
+
+ static constexpr size_t kDefaultSlowPathsCapacity = 8;
+
+ ScopedArenaAllocator allocator_;
+ StackMapStream stack_map_stream_;
+ ScopedArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
+
+ // Maps a StringReference (dex_file, string_index) to the index in the literal table.
+ // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
+ // will compute all the indices.
+ ScopedArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
+
+ // Maps a ClassReference (dex_file, type_index) to the index in the literal table.
+ // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
+ // will compute all the indices.
+ ScopedArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_;
+};
+
+void CodeGenerator::CodeGenerationData::EmitJitRoots(
+ Handle<mirror::ObjectArray<mirror::Object>> roots) {
+ DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ size_t index = 0;
+ for (auto& entry : jit_string_roots_) {
+ // Update the `roots` with the string, and replace the address temporarily
+ // stored to the index in the table.
+ uint64_t address = entry.second;
+ roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
+ DCHECK(roots->Get(index) != nullptr);
+ entry.second = index;
+ // Ensure the string is strongly interned. This is a requirement on how the JIT
+ // handles strings. b/32995596
+ class_linker->GetInternTable()->InternStrong(
+ reinterpret_cast<mirror::String*>(roots->Get(index)));
+ ++index;
+ }
+ for (auto& entry : jit_class_roots_) {
+ // Update the `roots` with the class, and replace the address temporarily
+ // stored to the index in the table.
+ uint64_t address = entry.second;
+ roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
+ DCHECK(roots->Get(index) != nullptr);
+ entry.second = index;
+ ++index;
+ }
+}
+
+ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() {
+ DCHECK(code_generation_data_ != nullptr);
+ return code_generation_data_->GetScopedAllocator();
+}
+
+StackMapStream* CodeGenerator::GetStackMapStream() {
+ DCHECK(code_generation_data_ != nullptr);
+ return code_generation_data_->GetStackMapStream();
+}
+
+void CodeGenerator::ReserveJitStringRoot(StringReference string_reference,
+ Handle<mirror::String> string) {
+ DCHECK(code_generation_data_ != nullptr);
+ code_generation_data_->ReserveJitStringRoot(string_reference, string);
+}
+
+uint64_t CodeGenerator::GetJitStringRootIndex(StringReference string_reference) {
+ DCHECK(code_generation_data_ != nullptr);
+ return code_generation_data_->GetJitStringRootIndex(string_reference);
+}
+
+void CodeGenerator::ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
+ DCHECK(code_generation_data_ != nullptr);
+ code_generation_data_->ReserveJitClassRoot(type_reference, klass);
+}
+
+uint64_t CodeGenerator::GetJitClassRootIndex(TypeReference type_reference) {
+ DCHECK(code_generation_data_ != nullptr);
+ return code_generation_data_->GetJitClassRootIndex(type_reference);
+}
+
+void CodeGenerator::EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
+ const uint8_t* roots_data ATTRIBUTE_UNUSED) {
+ DCHECK(code_generation_data_ != nullptr);
+ DCHECK_EQ(code_generation_data_->GetNumberOfJitStringRoots(), 0u);
+ DCHECK_EQ(code_generation_data_->GetNumberOfJitClassRoots(), 0u);
+}
+
size_t CodeGenerator::GetCacheOffset(uint32_t index) {
return sizeof(GcRoot<mirror::Object>) * index;
}
@@ -210,9 +364,10 @@ class DisassemblyScope {
void CodeGenerator::GenerateSlowPaths() {
+ DCHECK(code_generation_data_ != nullptr);
size_t code_start = 0;
- for (const std::unique_ptr<SlowPathCode>& slow_path_unique_ptr : slow_paths_) {
- SlowPathCode* slow_path = slow_path_unique_ptr.get();
+ for (const std::unique_ptr<SlowPathCode>& slow_path_ptr : code_generation_data_->GetSlowPaths()) {
+ SlowPathCode* slow_path = slow_path_ptr.get();
current_slow_path_ = slow_path;
if (disasm_info_ != nullptr) {
code_start = GetAssembler()->CodeSize();
@@ -227,7 +382,14 @@ void CodeGenerator::GenerateSlowPaths() {
current_slow_path_ = nullptr;
}
+void CodeGenerator::InitializeCodeGenerationData() {
+ DCHECK(code_generation_data_ == nullptr);
+ code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet());
+}
+
void CodeGenerator::Compile(CodeAllocator* allocator) {
+ InitializeCodeGenerationData();
+
// The register allocator already called `InitializeCodeGeneration`,
// where the frame size has been computed.
DCHECK(block_order_ != nullptr);
@@ -374,6 +536,7 @@ void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall(
break;
case kVirtual:
case kInterface:
+ case kPolymorphic:
LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
UNREACHABLE();
}
@@ -401,6 +564,9 @@ void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invok
case kInterface:
entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck;
break;
+ case kPolymorphic:
+ LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
+ UNREACHABLE();
}
InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
}
@@ -620,43 +786,43 @@ std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
ArenaAllocator* allocator = graph->GetAllocator();
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
- case kArm:
- case kThumb2: {
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2: {
return std::unique_ptr<CodeGenerator>(
new (allocator) arm::CodeGeneratorARMVIXL(
graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64: {
+ case InstructionSet::kArm64: {
return std::unique_ptr<CodeGenerator>(
new (allocator) arm64::CodeGeneratorARM64(
graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips: {
+ case InstructionSet::kMips: {
return std::unique_ptr<CodeGenerator>(
new (allocator) mips::CodeGeneratorMIPS(
graph, *isa_features.AsMipsInstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- case kMips64: {
+ case InstructionSet::kMips64: {
return std::unique_ptr<CodeGenerator>(
new (allocator) mips64::CodeGeneratorMIPS64(
graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- case kX86: {
+ case InstructionSet::kX86: {
return std::unique_ptr<CodeGenerator>(
new (allocator) x86::CodeGeneratorX86(
graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- case kX86_64: {
+ case InstructionSet::kX86_64: {
return std::unique_ptr<CodeGenerator>(
new (allocator) x86_64::CodeGeneratorX86_64(
graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options, stats));
@@ -667,12 +833,54 @@ std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
}
}
+CodeGenerator::CodeGenerator(HGraph* graph,
+ size_t number_of_core_registers,
+ size_t number_of_fpu_registers,
+ size_t number_of_register_pairs,
+ uint32_t core_callee_save_mask,
+ uint32_t fpu_callee_save_mask,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
+ : frame_size_(0),
+ core_spill_mask_(0),
+ fpu_spill_mask_(0),
+ first_register_slot_in_slow_path_(0),
+ allocated_registers_(RegisterSet::Empty()),
+ blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
+ kArenaAllocCodeGenerator)),
+ blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
+ kArenaAllocCodeGenerator)),
+ number_of_core_registers_(number_of_core_registers),
+ number_of_fpu_registers_(number_of_fpu_registers),
+ number_of_register_pairs_(number_of_register_pairs),
+ core_callee_save_mask_(core_callee_save_mask),
+ fpu_callee_save_mask_(fpu_callee_save_mask),
+ block_order_(nullptr),
+ disasm_info_(nullptr),
+ stats_(stats),
+ graph_(graph),
+ compiler_options_(compiler_options),
+ current_slow_path_(nullptr),
+ current_block_index_(0),
+ is_leaf_(true),
+ requires_current_method_(false),
+ code_generation_data_() {
+}
+
+CodeGenerator::~CodeGenerator() {}
+
void CodeGenerator::ComputeStackMapAndMethodInfoSize(size_t* stack_map_size,
size_t* method_info_size) {
DCHECK(stack_map_size != nullptr);
DCHECK(method_info_size != nullptr);
- *stack_map_size = stack_map_stream_.PrepareForFillIn();
- *method_info_size = stack_map_stream_.ComputeMethodInfoSize();
+ StackMapStream* stack_map_stream = GetStackMapStream();
+ *stack_map_size = stack_map_stream->PrepareForFillIn();
+ *method_info_size = stack_map_stream->ComputeMethodInfoSize();
+}
+
+size_t CodeGenerator::GetNumberOfJitRoots() const {
+ DCHECK(code_generation_data_ != nullptr);
+ return code_generation_data_->GetNumberOfJitRoots();
}
static void CheckCovers(uint32_t dex_pc,
@@ -711,10 +919,9 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
}
ArenaVector<size_t> covered(
loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
- IterationRange<DexInstructionIterator> instructions = code_item.Instructions();
- for (auto it = instructions.begin(); it != instructions.end(); ++it) {
- const uint32_t dex_pc = it.GetDexPC(instructions.begin());
- const Instruction& instruction = *it;
+ for (const DexInstructionPcPair& pair : code_item.Instructions()) {
+ const uint32_t dex_pc = pair.DexPc();
+ const Instruction& instruction = pair.Inst();
if (instruction.IsBranch()) {
uint32_t target = dex_pc + instruction.GetTargetOffset();
CheckCovers(target, graph, code_info, loop_headers, &covered);
@@ -740,8 +947,9 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region,
MemoryRegion method_info_region,
const DexFile::CodeItem& code_item) {
- stack_map_stream_.FillInCodeInfo(stack_map_region);
- stack_map_stream_.FillInMethodInfo(method_info_region);
+ StackMapStream* stack_map_stream = GetStackMapStream();
+ stack_map_stream->FillInCodeInfo(stack_map_region);
+ stack_map_stream->FillInMethodInfo(method_info_region);
if (kIsDebugBuild) {
CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map_region), code_item);
}
@@ -791,11 +999,12 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
// Collect PC infos for the mapping table.
uint32_t native_pc = GetAssembler()->CodePosition();
+ StackMapStream* stack_map_stream = GetStackMapStream();
if (instruction == nullptr) {
// For stack overflow checks and native-debug-info entries without dex register
// mapping (i.e. start of basic block or start of slow path).
- stack_map_stream_.BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0);
- stack_map_stream_.EndStackMapEntry();
+ stack_map_stream->BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0);
+ stack_map_stream->EndStackMapEntry();
return;
}
LocationSummary* locations = instruction->GetLocations();
@@ -814,7 +1023,7 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
// The register mask must be a subset of callee-save registers.
DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
}
- stack_map_stream_.BeginStackMapEntry(outer_dex_pc,
+ stack_map_stream->BeginStackMapEntry(outer_dex_pc,
native_pc,
register_mask,
locations->GetStackMask(),
@@ -830,10 +1039,10 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
instruction->IsInvoke() &&
instruction->IsInvokeStaticOrDirect()) {
HInvoke* const invoke = instruction->AsInvoke();
- stack_map_stream_.AddInvoke(invoke->GetInvokeType(), invoke->GetDexMethodIndex());
+ stack_map_stream->AddInvoke(invoke->GetInvokeType(), invoke->GetDexMethodIndex());
}
}
- stack_map_stream_.EndStackMapEntry();
+ stack_map_stream->EndStackMapEntry();
HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
if (instruction->IsSuspendCheck() &&
@@ -844,10 +1053,10 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
// We duplicate the stack map as a marker that this stack map can be an OSR entry.
// Duplicating it avoids having the runtime recognize and skip an OSR stack map.
DCHECK(info->IsIrreducible());
- stack_map_stream_.BeginStackMapEntry(
+ stack_map_stream->BeginStackMapEntry(
dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, 0);
EmitEnvironment(instruction->GetEnvironment(), slow_path);
- stack_map_stream_.EndStackMapEntry();
+ stack_map_stream->EndStackMapEntry();
if (kIsDebugBuild) {
for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
HInstruction* in_environment = environment->GetInstructionAt(i);
@@ -867,21 +1076,22 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
} else if (kIsDebugBuild) {
// Ensure stack maps are unique, by checking that the native pc in the stack map
// last emitted is different than the native pc of the stack map just emitted.
- size_t number_of_stack_maps = stack_map_stream_.GetNumberOfStackMaps();
+ size_t number_of_stack_maps = stack_map_stream->GetNumberOfStackMaps();
if (number_of_stack_maps > 1) {
- DCHECK_NE(stack_map_stream_.GetStackMap(number_of_stack_maps - 1).native_pc_code_offset,
- stack_map_stream_.GetStackMap(number_of_stack_maps - 2).native_pc_code_offset);
+ DCHECK_NE(stack_map_stream->GetStackMap(number_of_stack_maps - 1).native_pc_code_offset,
+ stack_map_stream->GetStackMap(number_of_stack_maps - 2).native_pc_code_offset);
}
}
}
bool CodeGenerator::HasStackMapAtCurrentPc() {
uint32_t pc = GetAssembler()->CodeSize();
- size_t count = stack_map_stream_.GetNumberOfStackMaps();
+ StackMapStream* stack_map_stream = GetStackMapStream();
+ size_t count = stack_map_stream->GetNumberOfStackMaps();
if (count == 0) {
return false;
}
- CodeOffset native_pc_offset = stack_map_stream_.GetStackMap(count - 1).native_pc_code_offset;
+ CodeOffset native_pc_offset = stack_map_stream->GetStackMap(count - 1).native_pc_code_offset;
return (native_pc_offset.Uint32Value(GetInstructionSet()) == pc);
}
@@ -899,6 +1109,7 @@ void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
void CodeGenerator::RecordCatchBlockInfo() {
ArenaAllocator* allocator = graph_->GetAllocator();
+ StackMapStream* stack_map_stream = GetStackMapStream();
for (HBasicBlock* block : *block_order_) {
if (!block->IsCatchBlock()) {
@@ -915,7 +1126,7 @@ void CodeGenerator::RecordCatchBlockInfo() {
ArenaBitVector* stack_mask =
ArenaBitVector::Create(allocator, 0, /* expandable */ true, kArenaAllocCodeGenerator);
- stack_map_stream_.BeginStackMapEntry(dex_pc,
+ stack_map_stream->BeginStackMapEntry(dex_pc,
native_pc,
register_mask,
stack_mask,
@@ -933,19 +1144,19 @@ void CodeGenerator::RecordCatchBlockInfo() {
}
if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
} else {
Location location = current_phi->GetLocations()->Out();
switch (location.GetKind()) {
case Location::kStackSlot: {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
break;
}
case Location::kDoubleStackSlot: {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
++vreg;
DCHECK_LT(vreg, num_vregs);
@@ -960,17 +1171,23 @@ void CodeGenerator::RecordCatchBlockInfo() {
}
}
- stack_map_stream_.EndStackMapEntry();
+ stack_map_stream->EndStackMapEntry();
}
}
+void CodeGenerator::AddSlowPath(SlowPathCode* slow_path) {
+ DCHECK(code_generation_data_ != nullptr);
+ code_generation_data_->AddSlowPath(slow_path);
+}
+
void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) {
if (environment == nullptr) return;
+ StackMapStream* stack_map_stream = GetStackMapStream();
if (environment->GetParent() != nullptr) {
// We emit the parent environment first.
EmitEnvironment(environment->GetParent(), slow_path);
- stack_map_stream_.BeginInlineInfoEntry(environment->GetMethod(),
+ stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(),
environment->GetDexPc(),
environment->Size(),
&graph_->GetDexFile());
@@ -980,7 +1197,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
HInstruction* current = environment->GetInstructionAt(i);
if (current == nullptr) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
continue;
}
@@ -990,43 +1207,43 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
DCHECK_EQ(current, location.GetConstant());
if (current->IsLongConstant()) {
int64_t value = current->AsLongConstant()->GetValue();
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kConstant, Low32Bits(value));
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kConstant, High32Bits(value));
++i;
DCHECK_LT(i, environment_size);
} else if (current->IsDoubleConstant()) {
int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kConstant, Low32Bits(value));
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kConstant, High32Bits(value));
++i;
DCHECK_LT(i, environment_size);
} else if (current->IsIntConstant()) {
int32_t value = current->AsIntConstant()->GetValue();
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
} else if (current->IsNullConstant()) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
} else {
DCHECK(current->IsFloatConstant()) << current->DebugName();
int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
}
break;
}
case Location::kStackSlot: {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
break;
}
case Location::kDoubleStackSlot: {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
++i;
DCHECK_LT(i, environment_size);
@@ -1037,17 +1254,17 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
int id = location.reg();
if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
if (current->GetType() == DataType::Type::kInt64) {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
++i;
DCHECK_LT(i, environment_size);
}
} else {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
if (current->GetType() == DataType::Type::kInt64) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id);
++i;
DCHECK_LT(i, environment_size);
}
@@ -1059,17 +1276,17 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
int id = location.reg();
if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
if (current->GetType() == DataType::Type::kFloat64) {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
++i;
DCHECK_LT(i, environment_size);
}
} else {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
if (current->GetType() == DataType::Type::kFloat64) {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInFpuRegisterHigh, id);
++i;
DCHECK_LT(i, environment_size);
@@ -1083,16 +1300,16 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
int high = location.high();
if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
} else {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low);
}
if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
++i;
} else {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high);
++i;
}
DCHECK_LT(i, environment_size);
@@ -1104,15 +1321,15 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
int high = location.high();
if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
} else {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low);
}
if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
} else {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high);
}
++i;
DCHECK_LT(i, environment_size);
@@ -1120,7 +1337,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
}
case Location::kInvalid: {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
break;
}
@@ -1130,7 +1347,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
}
if (environment->GetParent() != nullptr) {
- stack_map_stream_.EndInlineInfoEntry();
+ stack_map_stream->EndInlineInfoEntry();
}
}
@@ -1408,31 +1625,7 @@ void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
void CodeGenerator::EmitJitRoots(uint8_t* code,
Handle<mirror::ObjectArray<mirror::Object>> roots,
const uint8_t* roots_data) {
- DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- size_t index = 0;
- for (auto& entry : jit_string_roots_) {
- // Update the `roots` with the string, and replace the address temporarily
- // stored to the index in the table.
- uint64_t address = entry.second;
- roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
- DCHECK(roots->Get(index) != nullptr);
- entry.second = index;
- // Ensure the string is strongly interned. This is a requirement on how the JIT
- // handles strings. b/32995596
- class_linker->GetInternTable()->InternStrong(
- reinterpret_cast<mirror::String*>(roots->Get(index)));
- ++index;
- }
- for (auto& entry : jit_class_roots_) {
- // Update the `roots` with the class, and replace the address temporarily
- // stored to the index in the table.
- uint64_t address = entry.second;
- roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
- DCHECK(roots->Get(index) != nullptr);
- entry.second = index;
- ++index;
- }
+ code_generation_data_->EmitJitRoots(roots);
EmitJitRootPatches(code, roots_data);
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 2904b71991..18ad60db87 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -32,7 +32,7 @@
#include "optimizing_compiler_stats.h"
#include "read_barrier_option.h"
#include "stack.h"
-#include "stack_map_stream.h"
+#include "stack_map.h"
#include "string_reference.h"
#include "type_reference.h"
#include "utils/label.h"
@@ -61,6 +61,7 @@ class Assembler;
class CodeGenerator;
class CompilerDriver;
class CompilerOptions;
+class StackMapStream;
class ParallelMoveResolver;
namespace linker {
@@ -190,7 +191,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
const InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats = nullptr);
- virtual ~CodeGenerator() {}
+ virtual ~CodeGenerator();
// Get the graph. This is the outermost graph, never the graph of a method being inlined.
HGraph* GetGraph() const { return graph_; }
@@ -338,18 +339,16 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// TODO: Replace with a catch-entering instruction that records the environment.
void RecordCatchBlockInfo();
- // TODO: Avoid creating the `std::unique_ptr` here.
- void AddSlowPath(SlowPathCode* slow_path) {
- slow_paths_.push_back(std::unique_ptr<SlowPathCode>(slow_path));
- }
+ // Get the ScopedArenaAllocator used for codegen memory allocation.
+ ScopedArenaAllocator* GetScopedAllocator();
+
+ void AddSlowPath(SlowPathCode* slow_path);
void BuildStackMaps(MemoryRegion stack_map_region,
MemoryRegion method_info_region,
const DexFile::CodeItem& code_item);
void ComputeStackMapAndMethodInfoSize(size_t* stack_map_size, size_t* method_info_size);
- size_t GetNumberOfJitRoots() const {
- return jit_string_roots_.size() + jit_class_roots_.size();
- }
+ size_t GetNumberOfJitRoots() const;
// Fills the `literals` array with literals collected during code generation.
// Also emits literal patches.
@@ -600,38 +599,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
uint32_t core_callee_save_mask,
uint32_t fpu_callee_save_mask,
const CompilerOptions& compiler_options,
- OptimizingCompilerStats* stats)
- : frame_size_(0),
- core_spill_mask_(0),
- fpu_spill_mask_(0),
- first_register_slot_in_slow_path_(0),
- allocated_registers_(RegisterSet::Empty()),
- blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
- kArenaAllocCodeGenerator)),
- blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
- kArenaAllocCodeGenerator)),
- number_of_core_registers_(number_of_core_registers),
- number_of_fpu_registers_(number_of_fpu_registers),
- number_of_register_pairs_(number_of_register_pairs),
- core_callee_save_mask_(core_callee_save_mask),
- fpu_callee_save_mask_(fpu_callee_save_mask),
- stack_map_stream_(graph->GetAllocator(), graph->GetInstructionSet()),
- block_order_(nullptr),
- jit_string_roots_(StringReferenceValueComparator(),
- graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
- jit_class_roots_(TypeReferenceValueComparator(),
- graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
- disasm_info_(nullptr),
- stats_(stats),
- graph_(graph),
- compiler_options_(compiler_options),
- slow_paths_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
- current_slow_path_(nullptr),
- current_block_index_(0),
- is_leaf_(true),
- requires_current_method_(false) {
- slow_paths_.reserve(8);
- }
+ OptimizingCompilerStats* stats);
virtual HGraphVisitor* GetLocationBuilder() = 0;
virtual HGraphVisitor* GetInstructionVisitor() = 0;
@@ -658,7 +626,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
bool CallPushesPC() const {
InstructionSet instruction_set = GetInstructionSet();
- return instruction_set == kX86 || instruction_set == kX86_64;
+ return instruction_set == InstructionSet::kX86 || instruction_set == InstructionSet::kX86_64;
}
// Arm64 has its own type for a label, so we need to templatize these methods
@@ -687,12 +655,15 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
return current_slow_path_;
}
+ StackMapStream* GetStackMapStream();
+
+ void ReserveJitStringRoot(StringReference string_reference, Handle<mirror::String> string);
+ uint64_t GetJitStringRootIndex(StringReference string_reference);
+ void ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass);
+ uint64_t GetJitClassRootIndex(TypeReference type_reference);
+
// Emit the patches assocatied with JIT roots. Only applies to JIT compiled code.
- virtual void EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
- const uint8_t* roots_data ATTRIBUTE_UNUSED) {
- DCHECK_EQ(jit_string_roots_.size(), 0u);
- DCHECK_EQ(jit_class_roots_.size(), 0u);
- }
+ virtual void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data);
// Frame size required for this method.
uint32_t frame_size_;
@@ -714,24 +685,15 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
const uint32_t core_callee_save_mask_;
const uint32_t fpu_callee_save_mask_;
- StackMapStream stack_map_stream_;
-
// The order to use for code generation.
const ArenaVector<HBasicBlock*>* block_order_;
- // Maps a StringReference (dex_file, string_index) to the index in the literal table.
- // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
- // will compute all the indices.
- ArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
-
- // Maps a ClassReference (dex_file, type_index) to the index in the literal table.
- // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
- // will compute all the indices.
- ArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_;
-
DisassemblyInformation* disasm_info_;
private:
+ class CodeGenerationData;
+
+ void InitializeCodeGenerationData();
size_t GetStackOffsetOfSavedRegister(size_t index);
void GenerateSlowPaths();
void BlockIfInRegister(Location location, bool is_out = false) const;
@@ -742,8 +704,6 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
HGraph* const graph_;
const CompilerOptions& compiler_options_;
- ArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
-
// The current slow-path that we're generating code for.
SlowPathCode* current_slow_path_;
@@ -759,6 +719,12 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// needs the environment including a valid stack frame.
bool requires_current_method_;
+ // The CodeGenerationData contains a ScopedArenaAllocator intended for reusing the
+ // ArenaStack memory allocated in previous passes instead of adding to the memory
+ // held by the ArenaAllocator. This ScopedArenaAllocator is created in
+ // CodeGenerator::Compile() and remains alive until the CodeGenerator is destroyed.
+ std::unique_ptr<CodeGenerationData> code_generation_data_;
+
friend class OptimizingCFITest;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
@@ -863,7 +829,8 @@ class SlowPathGenerator {
{{}, {graph_->GetAllocator()->Adapter(kArenaAllocSlowPaths)}});
}
// Cannot share: create and add new slow-path for this particular dex-pc.
- SlowPathCodeType* slow_path = new (graph_->GetAllocator()) SlowPathCodeType(instruction);
+ SlowPathCodeType* slow_path =
+ new (codegen_->GetScopedAllocator()) SlowPathCodeType(instruction);
iter->second.emplace_back(std::make_pair(instruction, slow_path));
codegen_->AddSlowPath(slow_path);
return slow_path;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 64f2e46895..a0cb43ee01 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1557,12 +1557,13 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
MacroAssembler* masm = GetVIXLAssembler();
__ Bind(&frame_entry_label_);
- bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
+ bool do_overflow_check =
+ FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm64) || !IsLeafMethod();
if (do_overflow_check) {
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireX();
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
- __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
+ __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kArm64)));
{
// Ensure that between load and RecordPcInfo there are no pools emitted.
ExactAssemblyScope eas(GetVIXLAssembler(),
@@ -2205,7 +2206,8 @@ void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruct
SuspendCheckSlowPathARM64* slow_path =
down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARM64(instruction, successor);
+ slow_path =
+ new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathARM64(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -3013,7 +3015,7 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARM64(instruction);
+ slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
vixl::aarch64::Label non_zero;
@@ -3128,7 +3130,7 @@ void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
BoundsCheckSlowPathARM64* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARM64(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
__ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
__ B(slow_path->GetEntryLabel(), hs);
@@ -3145,7 +3147,7 @@ void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64(
+ SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
@@ -3502,7 +3504,7 @@ void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARM64(instruction);
+ new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
@@ -4057,8 +4059,8 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
kWithoutReadBarrier);
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -4089,8 +4091,8 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -4178,8 +4180,8 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeARM64* type_check_slow_path =
- new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
- is_type_check_slow_path_fatal);
+ new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
+ instruction, is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
vixl::aarch64::Label done;
@@ -4687,8 +4689,7 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddres
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLiteral(
const DexFile& dex_file, dex::StringIndex string_index, Handle<mirror::String> handle) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -4696,8 +4697,7 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLitera
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral(
const DexFile& dex_file, dex::TypeIndex type_index, Handle<mirror::Class> handle) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -5012,7 +5012,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
bool do_clinit = cls->MustGenerateClinitCheck();
if (generate_null_check || do_clinit) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64(
+ SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
cls, cls, cls->GetDexPc(), do_clinit, bss_entry_temp, bss_entry_adrp_label);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -5152,7 +5152,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
ldr_label,
kCompilerReadBarrierOption);
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label);
codegen_->AddSlowPath(slow_path);
__ Cbz(out.X(), slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -5393,8 +5393,7 @@ void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) NullCheckSlowPathARM64(instruction);
+ SlowPathCodeARM64* slow_path = new (GetScopedAllocator()) NullCheckSlowPathARM64(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -6036,7 +6035,7 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(
// Slow path marking the GC root `root`. The entrypoint will
// be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root);
+ new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root);
codegen_->AddSlowPath(slow_path);
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -6295,7 +6294,7 @@ void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction*
// Slow path marking the object `ref` when the GC is marking. The
// entrypoint will be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
+ new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
instruction,
ref,
obj,
@@ -6353,7 +6352,7 @@ void CodeGeneratorARM64::UpdateReferenceFieldWithBakerReadBarrier(HInstruction*
// Slow path updating the object reference at address `obj + field_offset`
// when the GC is marking. The entrypoint will be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
+ new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
instruction,
ref,
obj,
@@ -6480,7 +6479,7 @@ void CodeGeneratorARM64::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator())
+ SlowPathCodeARM64* slow_path = new (GetScopedAllocator())
ReadBarrierForHeapReferenceSlowPathARM64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -6516,7 +6515,7 @@ void CodeGeneratorARM64::GenerateReadBarrierForRootSlow(HInstruction* instructio
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARM64(instruction, out, root);
+ new (GetScopedAllocator()) ReadBarrierForRootSlowPathARM64(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -6562,17 +6561,13 @@ void CodeGeneratorARM64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_
for (const auto& entry : jit_string_patches_) {
const StringReference& string_reference = entry.first;
vixl::aarch64::Literal<uint32_t>* table_entry_literal = entry.second;
- const auto it = jit_string_roots_.find(string_reference);
- DCHECK(it != jit_string_roots_.end());
- uint64_t index_in_table = it->second;
+ uint64_t index_in_table = GetJitStringRootIndex(string_reference);
PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
}
for (const auto& entry : jit_class_patches_) {
const TypeReference& type_reference = entry.first;
vixl::aarch64::Literal<uint32_t>* table_entry_literal = entry.second;
- const auto it = jit_class_roots_.find(type_reference);
- DCHECK(it != jit_class_roots_.end());
- uint64_t index_in_table = it->second;
+ uint64_t index_in_table = GetJitClassRootIndex(type_reference);
PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
}
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index c7e2640590..a8f7e8600a 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2568,7 +2568,7 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
if (!skip_overflow_check) {
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
- __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(kArm)));
+ __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(InstructionSet::kArm)));
// The load must immediately precede RecordPcInfo.
ExactAssemblyScope aas(GetVIXLAssembler(),
vixl32::kMaxInstructionSizeInBytes,
@@ -4733,7 +4733,7 @@ void LocationsBuilderARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
DivZeroCheckSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARMVIXL(instruction);
+ new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -5303,7 +5303,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) {
vixl32::Label less, greater, done;
vixl32::Label* final_label = codegen_->GetFinalLabel(compare, &done);
DataType::Type type = compare->InputAt(0)->GetType();
- vixl32::Condition less_cond = vixl32::Condition(kNone);
+ vixl32::Condition less_cond = vixl32::Condition::None();
switch (type) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -5960,7 +5960,7 @@ void CodeGeneratorARMVIXL::GenerateImplicitNullCheck(HNullCheck* instruction) {
void CodeGeneratorARMVIXL::GenerateExplicitNullCheck(HNullCheck* instruction) {
NullCheckSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) NullCheckSlowPathARMVIXL(instruction);
+ new (GetScopedAllocator()) NullCheckSlowPathARMVIXL(instruction);
AddSlowPath(slow_path);
__ CompareAndBranchIfZero(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
}
@@ -6433,7 +6433,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
SlowPathCodeARMVIXL* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARMVIXL(instruction);
+ slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
vixl32::Label non_zero;
@@ -6694,7 +6694,7 @@ void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction
int32_t index = Int32ConstantFrom(index_loc);
if (index < 0 || index >= length) {
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
} else {
@@ -6705,13 +6705,13 @@ void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction
}
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
__ Cmp(RegisterFrom(index_loc), length);
codegen_->AddSlowPath(slow_path);
__ B(hs, slow_path->GetEntryLabel());
} else {
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
__ Cmp(RegisterFrom(length_loc), InputOperandAt(instruction, 0));
codegen_->AddSlowPath(slow_path);
__ B(ls, slow_path->GetEntryLabel());
@@ -6778,7 +6778,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instru
down_cast<SuspendCheckSlowPathARMVIXL*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
slow_path =
- new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARMVIXL(instruction, successor);
+ new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathARMVIXL(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -7215,8 +7215,9 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- LoadClassSlowPathARMVIXL* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ LoadClassSlowPathARMVIXL* slow_path =
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
@@ -7242,10 +7243,10 @@ void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
LoadClassSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
- check,
- check->GetDexPc(),
- /* do_clinit */ true);
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
+ check,
+ check->GetDexPc(),
+ /* do_clinit */ true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
}
@@ -7355,7 +7356,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
codegen_->EmitMovwMovtPlaceholder(labels, temp);
GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) LoadStringSlowPathARMVIXL(load);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -7682,8 +7683,8 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
kWithoutReadBarrier);
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -7711,8 +7712,8 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7790,8 +7791,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeARMVIXL* type_check_slow_path =
- new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
- is_type_check_slow_path_fatal);
+ new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
+ instruction, is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
vixl32::Label done;
@@ -8452,7 +8453,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
// Slow path marking the GC root `root`. The entrypoint will
// be loaded by the slow path code.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
+ new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
codegen_->AddSlowPath(slow_path);
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -8701,7 +8702,7 @@ void CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier(HInstructio
// Slow path marking the object `ref` when the GC is marking. The
// entrypoint will be loaded by the slow path code.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
+ new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
instruction, ref, obj, offset, index, scale_factor, needs_null_check, temp_reg);
AddSlowPath(slow_path);
@@ -8747,8 +8748,8 @@ void CodeGeneratorARMVIXL::UpdateReferenceFieldWithBakerReadBarrier(HInstruction
// Slow path updating the object reference at address `obj + field_offset`
// when the GC is marking. The entrypoint will be loaded by the slow path code.
- SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator())
- LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
+ SlowPathCodeARMVIXL* slow_path =
+ new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
instruction,
ref,
obj,
@@ -8859,7 +8860,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator())
+ SlowPathCodeARMVIXL* slow_path = new (GetScopedAllocator())
ReadBarrierForHeapReferenceSlowPathARMVIXL(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -8895,7 +8896,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruct
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root);
+ new (GetScopedAllocator()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -9109,8 +9110,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(
const DexFile& dex_file,
dex::StringIndex string_index,
Handle<mirror::String> handle) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() {
@@ -9121,8 +9121,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
Handle<mirror::Class> handle) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() {
@@ -9402,17 +9401,13 @@ void CodeGeneratorARMVIXL::EmitJitRootPatches(uint8_t* code, const uint8_t* root
for (const auto& entry : jit_string_patches_) {
const StringReference& string_reference = entry.first;
VIXLUInt32Literal* table_entry_literal = entry.second;
- const auto it = jit_string_roots_.find(string_reference);
- DCHECK(it != jit_string_roots_.end());
- uint64_t index_in_table = it->second;
+ uint64_t index_in_table = GetJitStringRootIndex(string_reference);
PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
}
for (const auto& entry : jit_class_patches_) {
const TypeReference& type_reference = entry.first;
VIXLUInt32Literal* table_entry_literal = entry.second;
- const auto it = jit_class_roots_.find(type_reference);
- DCHECK(it != jit_class_roots_.end());
- uint64_t index_in_table = it->second;
+ uint64_t index_in_table = GetJitClassRootIndex(type_reference);
PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
}
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 85177115eb..f9f5a4da56 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -33,6 +33,7 @@
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "offsets.h"
+#include "stack_map_stream.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/mips/assembler_mips.h"
@@ -460,6 +461,10 @@ class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
+ HBasicBlock* GetSuccessor() const {
+ return successor_;
+ }
+
private:
// If not null, the block to branch to after the suspend check.
HBasicBlock* const successor_;
@@ -1128,12 +1133,13 @@ void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
__ FinalizeCode();
// Adjust native pc offsets in stack maps.
- for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
+ StackMapStream* stack_map_stream = GetStackMapStream();
+ for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
uint32_t old_position =
- stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips);
+ stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
- stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
+ stack_map_stream->SetStackMapNativePcOffset(i, new_position);
}
// Adjust pc offsets for the disassembly information.
@@ -1298,7 +1304,7 @@ void ParallelMoveResolverMIPS::Exchange(int index1, int index2, bool double_slot
// automatically unspilled when the scratch scope object is destroyed).
ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
// If V0 spills onto the stack, SP-relative offsets need to be adjusted.
- int stack_offset = ensure_scratch.IsSpilled() ? kMipsWordSize : 0;
+ int stack_offset = ensure_scratch.IsSpilled() ? kStackAlignment : 0;
for (int i = 0; i <= (double_slot ? 1 : 0); i++, stack_offset += kMipsWordSize) {
__ LoadFromOffset(kLoadWord,
Register(ensure_scratch.GetRegister()),
@@ -1345,13 +1351,14 @@ static dwarf::Reg DWARFReg(Register reg) {
void CodeGeneratorMIPS::GenerateFrameEntry() {
__ Bind(&frame_entry_label_);
- bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips) || !IsLeafMethod();
+ bool do_overflow_check =
+ FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips) || !IsLeafMethod();
if (do_overflow_check) {
__ LoadFromOffset(kLoadWord,
ZERO,
SP,
- -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips)));
+ -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips)));
RecordPcInfo(nullptr, 0);
}
@@ -1363,8 +1370,9 @@ void CodeGeneratorMIPS::GenerateFrameEntry() {
}
// Make sure the frame size isn't unreasonably large.
- if (GetFrameSize() > GetStackOverflowReservedBytes(kMips)) {
- LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips) << " bytes";
+ if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips)) {
+ LOG(FATAL) << "Stack frame larger than "
+ << GetStackOverflowReservedBytes(InstructionSet::kMips) << " bytes";
}
// Spill callee-saved registers.
@@ -1788,21 +1796,19 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo
CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
const DexFile& dex_file,
- dex::StringIndex dex_index,
+ dex::StringIndex string_index,
Handle<mirror::String> handle) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
- jit_string_patches_.emplace_back(dex_file, dex_index.index_);
+ ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
+ jit_string_patches_.emplace_back(dex_file, string_index.index_);
return &jit_string_patches_.back();
}
CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootClassPatch(
const DexFile& dex_file,
- dex::TypeIndex dex_index,
+ dex::TypeIndex type_index,
Handle<mirror::Class> handle) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
- jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+ ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
+ jit_class_patches_.emplace_back(dex_file, type_index.index_);
return &jit_class_patches_.back();
}
@@ -1834,17 +1840,13 @@ void CodeGeneratorMIPS::PatchJitRootUse(uint8_t* code,
void CodeGeneratorMIPS::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
for (const JitPatchInfo& info : jit_string_patches_) {
- const auto it = jit_string_roots_.find(StringReference(&info.target_dex_file,
- dex::StringIndex(info.index)));
- DCHECK(it != jit_string_roots_.end());
- uint64_t index_in_table = it->second;
+ StringReference string_reference(&info.target_dex_file, dex::StringIndex(info.index));
+ uint64_t index_in_table = GetJitStringRootIndex(string_reference);
PatchJitRootUse(code, roots_data, info, index_in_table);
}
for (const JitPatchInfo& info : jit_class_patches_) {
- const auto it = jit_class_roots_.find(TypeReference(&info.target_dex_file,
- dex::TypeIndex(info.index)));
- DCHECK(it != jit_class_roots_.end());
- uint64_t index_in_table = it->second;
+ TypeReference type_reference(&info.target_dex_file, dex::TypeIndex(info.index));
+ uint64_t index_in_table = GetJitClassRootIndex(type_reference);
PatchJitRootUse(code, roots_data, info, index_in_table);
}
}
@@ -1998,8 +2000,19 @@ void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATT
void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathMIPS* slow_path =
- new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS(instruction, successor);
- codegen_->AddSlowPath(slow_path);
+ down_cast<SuspendCheckSlowPathMIPS*>(instruction->GetSlowPath());
+
+ if (slow_path == nullptr) {
+ slow_path =
+ new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS(instruction, successor);
+ instruction->SetSlowPath(slow_path);
+ codegen_->AddSlowPath(slow_path);
+ if (successor != nullptr) {
+ DCHECK(successor->IsLoopHeader());
+ }
+ } else {
+ DCHECK_EQ(slow_path->GetSuccessor(), successor);
+ }
__ LoadFromOffset(kLoadUnsignedHalfword,
TMP,
@@ -2986,7 +2999,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
SlowPathCodeMIPS* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS(instruction);
+ slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
MipsLabel non_zero;
@@ -3171,7 +3184,7 @@ void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
BoundsCheckSlowPathMIPS* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
Register index = locations->InAt(0).AsRegister<Register>();
@@ -3263,8 +3276,8 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
- is_type_check_slow_path_fatal);
+ new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
+ instruction, is_type_check_slow_path_fatal);
codegen_->AddSlowPath(slow_path);
// Avoid this check if we know `obj` is not null.
@@ -3427,7 +3440,7 @@ void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS(
+ SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
check->GetLoadClass(),
check,
check->GetDexPc(),
@@ -3884,7 +3897,7 @@ void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS(instruction);
+ new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
DataType::Type type = instruction->GetType();
@@ -6248,8 +6261,11 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
InvokeRuntimeCallingConvention calling_convention;
__ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
// Do implicit Null check
- __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ __ LoadFromOffset(kLoadWord,
+ ZERO,
+ locations->GetTemp(0).AsRegister<Register>(),
+ 0,
+ null_checker);
codegen_->InvokeRuntime(kQuickA64Load, instruction, dex_pc);
CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
if (type == DataType::Type::kFloat64) {
@@ -6402,8 +6418,11 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
InvokeRuntimeCallingConvention calling_convention;
__ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
// Do implicit Null check.
- __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ __ LoadFromOffset(kLoadWord,
+ ZERO,
+ locations->GetTemp(0).AsRegister<Register>(),
+ 0,
+ null_checker);
if (type == DataType::Type::kFloat64) {
// Pass FP parameters in core registers.
if (value_location.IsFpuRegister()) {
@@ -6693,7 +6712,7 @@ void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruc
// Slow path marking the GC root `root`.
Location temp = Location::RegisterLocation(T9);
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(
+ new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(
instruction,
root,
/*entrypoint*/ temp);
@@ -7020,14 +7039,14 @@ void CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier(HInstruction*
// to be null in this code path.
DCHECK_EQ(offset, 0u);
DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
- slow_path = new (GetGraph()->GetAllocator())
+ slow_path = new (GetScopedAllocator())
ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
ref,
obj,
/* field_offset */ index,
temp_reg);
} else {
- slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
+ slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
}
AddSlowPath(slow_path);
@@ -7063,7 +7082,7 @@ void CodeGeneratorMIPS::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator())
+ SlowPathCodeMIPS* slow_path = new (GetScopedAllocator())
ReadBarrierForHeapReferenceSlowPathMIPS(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -7099,7 +7118,7 @@ void CodeGeneratorMIPS::GenerateReadBarrierForRootSlow(HInstruction* instruction
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
+ new (GetScopedAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -7269,8 +7288,8 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
maybe_temp_loc,
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bne(out, cls, slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -7298,8 +7317,8 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7844,7 +7863,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS(
+ SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -8011,7 +8030,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS(load, info_high);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load, info_high);
codegen_->AddSlowPath(slow_path);
__ Beqz(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -8338,7 +8357,7 @@ void CodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS(instruction);
+ SlowPathCodeMIPS* slow_path = new (GetScopedAllocator()) NullCheckSlowPathMIPS(instruction);
AddSlowPath(slow_path);
Location obj = instruction->GetLocations()->InAt(0);
@@ -9241,6 +9260,16 @@ void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instructio
}
}
+void LocationsBuilderMIPS::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
#undef __
#undef QUICK_ENTRY_POINT
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 5f2f90004d..7845e312cb 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -662,10 +662,10 @@ class CodeGeneratorMIPS : public CodeGenerator {
const JitPatchInfo& info,
uint64_t index_in_table) const;
JitPatchInfo* NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index,
+ dex::StringIndex string_index,
Handle<mirror::String> handle);
JitPatchInfo* NewJitRootClassPatch(const DexFile& dex_file,
- dex::TypeIndex dex_index,
+ dex::TypeIndex type_index,
Handle<mirror::Class> handle);
private:
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 51601a147e..0a6d9159d1 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -31,6 +31,7 @@
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "offsets.h"
+#include "stack_map_stream.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/mips64/assembler_mips64.h"
@@ -414,6 +415,10 @@ class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
+ HBasicBlock* GetSuccessor() const {
+ return successor_;
+ }
+
private:
// If not null, the block to branch to after the suspend check.
HBasicBlock* const successor_;
@@ -1072,12 +1077,13 @@ void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
__ FinalizeCode();
// Adjust native pc offsets in stack maps.
- for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
+ StackMapStream* stack_map_stream = GetStackMapStream();
+ for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
uint32_t old_position =
- stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64);
+ stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips64);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
- stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
+ stack_map_stream->SetStackMapNativePcOffset(i, new_position);
}
// Adjust pc offsets for the disassembly information.
@@ -1159,13 +1165,15 @@ static dwarf::Reg DWARFReg(FpuRegister reg) {
void CodeGeneratorMIPS64::GenerateFrameEntry() {
__ Bind(&frame_entry_label_);
- bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
+ bool do_overflow_check =
+ FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips64) || !IsLeafMethod();
if (do_overflow_check) {
- __ LoadFromOffset(kLoadWord,
- ZERO,
- SP,
- -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
+ __ LoadFromOffset(
+ kLoadWord,
+ ZERO,
+ SP,
+ -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips64)));
RecordPcInfo(nullptr, 0);
}
@@ -1174,8 +1182,9 @@ void CodeGeneratorMIPS64::GenerateFrameEntry() {
}
// Make sure the frame size isn't unreasonably large.
- if (GetFrameSize() > GetStackOverflowReservedBytes(kMips64)) {
- LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips64) << " bytes";
+ if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips64)) {
+ LOG(FATAL) << "Stack frame larger than "
+ << GetStackOverflowReservedBytes(InstructionSet::kMips64) << " bytes";
}
// Spill callee-saved registers.
@@ -1681,8 +1690,7 @@ void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchIn
Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file,
dex::StringIndex string_index,
Handle<mirror::String> handle) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
@@ -1691,8 +1699,7 @@ Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_fil
Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
Handle<mirror::Class> handle) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
@@ -1712,17 +1719,13 @@ void CodeGeneratorMIPS64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots
for (const auto& entry : jit_string_patches_) {
const StringReference& string_reference = entry.first;
Literal* table_entry_literal = entry.second;
- const auto it = jit_string_roots_.find(string_reference);
- DCHECK(it != jit_string_roots_.end());
- uint64_t index_in_table = it->second;
+ uint64_t index_in_table = GetJitStringRootIndex(string_reference);
PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
}
for (const auto& entry : jit_class_patches_) {
const TypeReference& type_reference = entry.first;
Literal* table_entry_literal = entry.second;
- const auto it = jit_class_roots_.find(type_reference);
- DCHECK(it != jit_class_roots_.end());
- uint64_t index_in_table = it->second;
+ uint64_t index_in_table = GetJitClassRootIndex(type_reference);
PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
}
}
@@ -1835,8 +1838,19 @@ void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind A
void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor);
- codegen_->AddSlowPath(slow_path);
+ down_cast<SuspendCheckSlowPathMIPS64*>(instruction->GetSlowPath());
+
+ if (slow_path == nullptr) {
+ slow_path =
+ new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor);
+ instruction->SetSlowPath(slow_path);
+ codegen_->AddSlowPath(slow_path);
+ if (successor != nullptr) {
+ DCHECK(successor->IsLoopHeader());
+ }
+ } else {
+ DCHECK_EQ(slow_path->GetSuccessor(), successor);
+ }
__ LoadFromOffset(kLoadUnsignedHalfword,
TMP,
@@ -2543,7 +2557,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
SlowPathCodeMIPS64* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS64(instruction);
+ slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
Mips64Label non_zero;
@@ -2700,7 +2714,7 @@ void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
BoundsCheckSlowPathMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS64(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
@@ -2792,8 +2806,8 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
- is_type_check_slow_path_fatal);
+ new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
+ instruction, is_type_check_slow_path_fatal);
codegen_->AddSlowPath(slow_path);
// Avoid this check if we know `obj` is not null.
@@ -2956,7 +2970,7 @@ void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64(
+ SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
check->GetLoadClass(),
check,
check->GetDexPc(),
@@ -3430,7 +3444,7 @@ void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS64(instruction);
+ new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
@@ -5051,7 +5065,7 @@ void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(HInstruction* instr
// Slow path marking the GC root `root`.
Location temp = Location::RegisterLocation(T9);
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(
+ new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(
instruction,
root,
/*entrypoint*/ temp);
@@ -5336,14 +5350,14 @@ void CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction
// above are expected to be null in this code path.
DCHECK_EQ(offset, 0u);
DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
- slow_path = new (GetGraph()->GetAllocator())
+ slow_path = new (GetScopedAllocator())
ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
ref,
obj,
/* field_offset */ index,
temp_reg);
} else {
- slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
+ slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
}
AddSlowPath(slow_path);
@@ -5379,7 +5393,7 @@ void CodeGeneratorMIPS64::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator())
+ SlowPathCodeMIPS64* slow_path = new (GetScopedAllocator())
ReadBarrierForHeapReferenceSlowPathMIPS64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -5415,7 +5429,7 @@ void CodeGeneratorMIPS64::GenerateReadBarrierForRootSlow(HInstruction* instructi
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
+ new (GetScopedAllocator()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
@@ -5585,8 +5599,8 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
maybe_temp_loc,
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bnec(out, cls, slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -5614,8 +5628,8 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
break;
@@ -6083,7 +6097,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64(
+ SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -6201,7 +6215,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS64(load, info_high);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS64(load, info_high);
codegen_->AddSlowPath(slow_path);
__ Beqzc(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -6465,7 +6479,7 @@ void CodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
void CodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS64(instruction);
+ new (GetScopedAllocator()) NullCheckSlowPathMIPS64(instruction);
AddSlowPath(slow_path);
Location obj = instruction->GetLocations()->InAt(0);
@@ -7148,5 +7162,15 @@ void InstructionCodeGeneratorMIPS64::VisitClassTableGet(HClassTableGet* instruct
}
}
+void LocationsBuilderMIPS64::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS64::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
} // namespace mips64
} // namespace art
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index c5a39ff882..7a8c0ad025 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -91,17 +91,61 @@ void InstructionCodeGeneratorMIPS::VisitVecReplicateScalar(HVecReplicateScalar*
}
void LocationsBuilderMIPS::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void InstructionCodeGeneratorMIPS::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister src = VectorRegisterFrom(locations->InAt(0));
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Copy_sW(locations->Out().AsRegister<Register>(), src, 0);
+ break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Mfc1(locations->Out().AsRegisterPairLow<Register>(),
+ locations->InAt(0).AsFpuRegister<FRegister>());
+ __ MoveFromFpuHigh(locations->Out().AsRegisterPairHigh<Register>(),
+ locations->InAt(0).AsFpuRegister<FRegister>());
+ break;
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 4u);
+ DCHECK(locations->InAt(0).Equals(locations->Out())); // no code required
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
// Helper to set up locations for vector unary operations.
static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
LocationSummary* locations = new (allocator) LocationSummary(instruction);
- switch (instruction->GetPackedType()) {
+ DataType::Type type = instruction->GetPackedType();
+ switch (type) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(),
@@ -118,7 +162,8 @@ static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation
case DataType::Type::kFloat64:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(),
- (instruction->IsVecNeg() || instruction->IsVecAbs())
+ (instruction->IsVecNeg() || instruction->IsVecAbs() ||
+ (instruction->IsVecReduce() && type == DataType::Type::kInt64))
? Location::kOutputOverlap
: Location::kNoOutputOverlap);
break;
@@ -133,7 +178,54 @@ void LocationsBuilderMIPS::VisitVecReduce(HVecReduce* instruction) {
}
void InstructionCodeGeneratorMIPS::VisitVecReduce(HVecReduce* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister src = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ VectorRegister tmp = static_cast<VectorRegister>(FTMP);
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ switch (instruction->GetKind()) {
+ case HVecReduce::kSum:
+ __ Hadd_sD(tmp, src, src);
+ __ IlvlD(dst, tmp, tmp);
+ __ AddvW(dst, dst, tmp);
+ break;
+ case HVecReduce::kMin:
+ __ IlvodW(tmp, src, src);
+ __ Min_sW(tmp, src, tmp);
+ __ IlvlW(dst, tmp, tmp);
+ __ Min_sW(dst, dst, tmp);
+ break;
+ case HVecReduce::kMax:
+ __ IlvodW(tmp, src, src);
+ __ Max_sW(tmp, src, tmp);
+ __ IlvlW(dst, tmp, tmp);
+ __ Max_sW(dst, dst, tmp);
+ break;
+ }
+ break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ switch (instruction->GetKind()) {
+ case HVecReduce::kSum:
+ __ IlvlD(dst, src, src);
+ __ AddvD(dst, dst, src);
+ break;
+ case HVecReduce::kMin:
+ __ IlvlD(dst, src, src);
+ __ Min_sD(dst, dst, src);
+ break;
+ case HVecReduce::kMax:
+ __ IlvlD(dst, src, src);
+ __ Max_sD(dst, dst, src);
+ break;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS::VisitVecCnv(HVecCnv* instruction) {
@@ -831,11 +923,79 @@ void InstructionCodeGeneratorMIPS::VisitVecUShr(HVecUShr* instruction) {
}
void LocationsBuilderMIPS::VisitVecSetScalars(HVecSetScalars* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+
+ DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
+
+ HInstruction* input = instruction->InputAt(0);
+ bool is_zero = IsZeroBitPattern(input);
+
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
+ locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant())
+ : Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant())
+ : Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void InstructionCodeGeneratorMIPS::VisitVecSetScalars(HVecSetScalars* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+
+ DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
+
+ // Zero out all other elements first.
+ __ FillW(dst, ZERO);
+
+ // Shorthand for any type of zero.
+ if (IsZeroBitPattern(instruction->InputAt(0))) {
+ return;
+ }
+
+ // Set required elements.
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ InsertB(dst, locations->InAt(0).AsRegister<Register>(), 0);
+ break;
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ InsertH(dst, locations->InAt(0).AsRegister<Register>(), 0);
+ break;
+ case DataType::Type::kInt32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ InsertW(dst, locations->InAt(0).AsRegister<Register>(), 0);
+ break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Mtc1(locations->InAt(0).AsRegisterPairLow<Register>(),
+ locations->Out().AsFpuRegister<FRegister>());
+ __ MoveToFpuHigh(locations->InAt(0).AsRegisterPairHigh<Register>(),
+ locations->Out().AsFpuRegister<FRegister>());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
// Helper to set up locations for vector accumulations.
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index e606df2158..0c59b7344a 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -94,17 +94,58 @@ void InstructionCodeGeneratorMIPS64::VisitVecReplicateScalar(HVecReplicateScalar
}
void LocationsBuilderMIPS64::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void InstructionCodeGeneratorMIPS64::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister src = VectorRegisterFrom(locations->InAt(0));
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Copy_sW(locations->Out().AsRegister<GpuRegister>(), src, 0);
+ break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Copy_sD(locations->Out().AsRegister<GpuRegister>(), src, 0);
+ break;
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 4u);
+ DCHECK(locations->InAt(0).Equals(locations->Out())); // no code required
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
// Helper to set up locations for vector unary operations.
static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
LocationSummary* locations = new (allocator) LocationSummary(instruction);
- switch (instruction->GetPackedType()) {
+ DataType::Type type = instruction->GetPackedType();
+ switch (type) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(),
@@ -121,7 +162,8 @@ static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation
case DataType::Type::kFloat64:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(),
- (instruction->IsVecNeg() || instruction->IsVecAbs())
+ (instruction->IsVecNeg() || instruction->IsVecAbs() ||
+ (instruction->IsVecReduce() && type == DataType::Type::kInt64))
? Location::kOutputOverlap
: Location::kNoOutputOverlap);
break;
@@ -136,7 +178,54 @@ void LocationsBuilderMIPS64::VisitVecReduce(HVecReduce* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecReduce(HVecReduce* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister src = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ VectorRegister tmp = static_cast<VectorRegister>(FTMP);
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ switch (instruction->GetKind()) {
+ case HVecReduce::kSum:
+ __ Hadd_sD(tmp, src, src);
+ __ IlvlD(dst, tmp, tmp);
+ __ AddvW(dst, dst, tmp);
+ break;
+ case HVecReduce::kMin:
+ __ IlvodW(tmp, src, src);
+ __ Min_sW(tmp, src, tmp);
+ __ IlvlW(dst, tmp, tmp);
+ __ Min_sW(dst, dst, tmp);
+ break;
+ case HVecReduce::kMax:
+ __ IlvodW(tmp, src, src);
+ __ Max_sW(tmp, src, tmp);
+ __ IlvlW(dst, tmp, tmp);
+ __ Max_sW(dst, dst, tmp);
+ break;
+ }
+ break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ switch (instruction->GetKind()) {
+ case HVecReduce::kSum:
+ __ IlvlD(dst, src, src);
+ __ AddvD(dst, dst, src);
+ break;
+ case HVecReduce::kMin:
+ __ IlvlD(dst, src, src);
+ __ Min_sD(dst, dst, src);
+ break;
+ case HVecReduce::kMax:
+ __ IlvlD(dst, src, src);
+ __ Max_sD(dst, dst, src);
+ break;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecCnv(HVecCnv* instruction) {
@@ -835,11 +924,76 @@ void InstructionCodeGeneratorMIPS64::VisitVecUShr(HVecUShr* instruction) {
}
void LocationsBuilderMIPS64::VisitVecSetScalars(HVecSetScalars* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+
+ DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
+
+ HInstruction* input = instruction->InputAt(0);
+ bool is_zero = IsZeroBitPattern(input);
+
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
+ locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant())
+ : Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant())
+ : Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void InstructionCodeGeneratorMIPS64::VisitVecSetScalars(HVecSetScalars* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+
+ DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
+
+ // Zero out all other elements first.
+ __ FillW(dst, ZERO);
+
+ // Shorthand for any type of zero.
+ if (IsZeroBitPattern(instruction->InputAt(0))) {
+ return;
+ }
+
+ // Set required elements.
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ InsertB(dst, locations->InAt(0).AsRegister<GpuRegister>(), 0);
+ break;
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ InsertH(dst, locations->InAt(0).AsRegister<GpuRegister>(), 0);
+ break;
+ case DataType::Type::kInt32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ InsertW(dst, locations->InAt(0).AsRegister<GpuRegister>(), 0);
+ break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ InsertD(dst, locations->InAt(0).AsRegister<GpuRegister>(), 0);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
// Helper to set up locations for vector accumulations.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 7fb346582d..9b351605a4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1072,7 +1072,8 @@ void CodeGeneratorX86::GenerateFrameEntry() {
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
if (!skip_overflow_check) {
- __ testl(EAX, Address(ESP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86))));
+ size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86);
+ __ testl(EAX, Address(ESP, -static_cast<int32_t>(reserved_bytes)));
RecordPcInfo(nullptr, 0);
}
@@ -3582,7 +3583,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr
GenerateDivRemWithAnyConstant(instruction);
}
} else {
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) DivRemMinusOneSlowPathX86(
instruction, out.AsRegister<Register>(), is_div);
codegen_->AddSlowPath(slow_path);
@@ -3818,7 +3819,8 @@ void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
}
void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86(instruction);
+ SlowPathCode* slow_path =
+ new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -5151,7 +5153,7 @@ void CodeGeneratorX86::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorX86::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86(instruction);
+ SlowPathCode* slow_path = new (GetScopedAllocator()) NullCheckSlowPathX86(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -5429,7 +5431,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
Location temp_loc = locations->GetTemp(0);
Register temp = temp_loc.AsRegister<Register>();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86(instruction);
+ slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
__ testl(register_value, register_value);
@@ -5620,7 +5622,7 @@ void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
SlowPathCode* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathX86(instruction);
if (length_loc.IsConstant()) {
int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -5721,7 +5723,8 @@ void InstructionCodeGeneratorX86::GenerateSuspendCheck(HSuspendCheck* instructio
SuspendCheckSlowPathX86* slow_path =
down_cast<SuspendCheckSlowPathX86*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86(instruction, successor);
+ slow_path =
+ new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathX86(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -6078,12 +6081,11 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
}
Label* CodeGeneratorX86::NewJitRootClassPatch(const DexFile& dex_file,
- dex::TypeIndex dex_index,
+ dex::TypeIndex type_index,
Handle<mirror::Class> handle) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
// Add a patch entry and return the label.
- jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+ jit_class_patches_.emplace_back(dex_file, type_index.index_);
PatchInfo<Label>* info = &jit_class_patches_.back();
return &info->label;
}
@@ -6173,7 +6175,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
@@ -6201,7 +6203,7 @@ void LocationsBuilderX86::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
// We assume the class to not be null.
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
@@ -6263,12 +6265,11 @@ void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
}
Label* CodeGeneratorX86::NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index,
+ dex::StringIndex string_index,
Handle<mirror::String> handle) {
- jit_string_roots_.Overwrite(
- StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
// Add a patch entry and return the label.
- jit_string_patches_.emplace_back(dex_file, dex_index.index_);
+ jit_string_patches_.emplace_back(dex_file, string_index.index_);
PatchInfo<Label>* info = &jit_string_patches_.back();
return &info->label;
}
@@ -6308,7 +6309,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::String> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86(load);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -6589,8 +6590,8 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ cmpl(out, Address(ESP, cls.GetStackIndex()));
}
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -6621,8 +6622,8 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -6714,8 +6715,8 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
SlowPathCode* type_check_slow_path =
- new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
- is_type_check_slow_path_fatal);
+ new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
+ instruction, is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
NearLabel done;
@@ -7158,7 +7159,7 @@ void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(
"have different sizes.");
// Slow path marking the GC root `root`.
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
instruction, root, /* unpoison_ref_before_marking */ false);
codegen_->AddSlowPath(slow_path);
@@ -7288,10 +7289,10 @@ void CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* i
SlowPathCode* slow_path;
if (always_update_field) {
DCHECK(temp != nullptr);
- slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
+ slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp);
} else {
- slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86(
+ slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
instruction, ref, /* unpoison_ref_before_marking */ true);
}
AddSlowPath(slow_path);
@@ -7324,7 +7325,7 @@ void CodeGeneratorX86::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator())
+ SlowPathCode* slow_path = new (GetScopedAllocator())
ReadBarrierForHeapReferenceSlowPathX86(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -7360,7 +7361,7 @@ void CodeGeneratorX86::GenerateReadBarrierForRootSlow(HInstruction* instruction,
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCode* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86(instruction, out, root);
+ new (GetScopedAllocator()) ReadBarrierForRootSlowPathX86(instruction, out, root);
AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
@@ -7812,22 +7813,28 @@ void CodeGeneratorX86::PatchJitRootUse(uint8_t* code,
void CodeGeneratorX86::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
for (const PatchInfo<Label>& info : jit_string_patches_) {
- const auto it = jit_string_roots_.find(
- StringReference(&info.dex_file, dex::StringIndex(info.index)));
- DCHECK(it != jit_string_roots_.end());
- uint64_t index_in_table = it->second;
+ StringReference string_reference(&info.dex_file, dex::StringIndex(info.index));
+ uint64_t index_in_table = GetJitStringRootIndex(string_reference);
PatchJitRootUse(code, roots_data, info, index_in_table);
}
for (const PatchInfo<Label>& info : jit_class_patches_) {
- const auto it = jit_class_roots_.find(
- TypeReference(&info.dex_file, dex::TypeIndex(info.index)));
- DCHECK(it != jit_class_roots_.end());
- uint64_t index_in_table = it->second;
+ TypeReference type_reference(&info.dex_file, dex::TypeIndex(info.index));
+ uint64_t index_in_table = GetJitClassRootIndex(type_reference);
PatchJitRootUse(code, roots_data, info, index_in_table);
}
}
+void LocationsBuilderX86::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorX86::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
#undef __
} // namespace x86
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index fb61e75d73..176e4dfda0 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -422,10 +422,10 @@ class CodeGeneratorX86 : public CodeGenerator {
void RecordBootStringPatch(HLoadString* load_string);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index,
+ dex::StringIndex string_index,
Handle<mirror::String> handle);
Label* NewJitRootClassPatch(const DexFile& dex_file,
- dex::TypeIndex dex_index,
+ dex::TypeIndex type_index,
Handle<mirror::Class> handle);
void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 1dd1c015f4..8f7961ec6e 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1277,8 +1277,8 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
if (!skip_overflow_check) {
- __ testq(CpuRegister(RAX), Address(
- CpuRegister(RSP), -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86_64))));
+ size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86_64);
+ __ testq(CpuRegister(RAX), Address(CpuRegister(RSP), -static_cast<int32_t>(reserved_bytes)));
RecordPcInfo(nullptr, 0);
}
@@ -3650,7 +3650,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* in
}
} else {
SlowPathCode* slow_path =
- new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86_64(
+ new (codegen_->GetScopedAllocator()) DivRemMinusOneSlowPathX86_64(
instruction, out.AsRegister(), type, is_div);
codegen_->AddSlowPath(slow_path);
@@ -3819,7 +3819,7 @@ void LocationsBuilderX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCode* slow_path =
- new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86_64(instruction);
+ new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -4604,7 +4604,7 @@ void CodeGeneratorX86_64::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorX86_64::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86_64(instruction);
+ SlowPathCode* slow_path = new (GetScopedAllocator()) NullCheckSlowPathX86_64(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -4866,7 +4866,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
Location temp_loc = locations->GetTemp(0);
CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86_64(instruction);
+ slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
__ testl(register_value, register_value);
@@ -5045,7 +5045,8 @@ void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction)
LocationSummary* locations = instruction->GetLocations();
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86_64(instruction);
+ SlowPathCode* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathX86_64(instruction);
if (length_loc.IsConstant()) {
int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -5166,7 +5167,8 @@ void InstructionCodeGeneratorX86_64::GenerateSuspendCheck(HSuspendCheck* instruc
SuspendCheckSlowPathX86_64* slow_path =
down_cast<SuspendCheckSlowPathX86_64*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86_64(instruction, successor);
+ slow_path =
+ new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathX86_64(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -5470,12 +5472,11 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
}
Label* CodeGeneratorX86_64::NewJitRootClassPatch(const DexFile& dex_file,
- dex::TypeIndex dex_index,
+ dex::TypeIndex type_index,
Handle<mirror::Class> handle) {
- jit_class_roots_.Overwrite(
- TypeReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
// Add a patch entry and return the label.
- jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+ jit_class_patches_.emplace_back(dex_file, type_index.index_);
PatchInfo<Label>* info = &jit_class_patches_.back();
return &info->label;
}
@@ -5563,7 +5564,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -5589,7 +5590,7 @@ void LocationsBuilderX86_64::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class to not be null.
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
@@ -5636,12 +5637,11 @@ void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
}
Label* CodeGeneratorX86_64::NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index,
+ dex::StringIndex string_index,
Handle<mirror::String> handle) {
- jit_string_roots_.Overwrite(
- StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
// Add a patch entry and return the label.
- jit_string_patches_.emplace_back(dex_file, dex_index.index_);
+ jit_string_patches_.emplace_back(dex_file, string_index.index_);
PatchInfo<Label>* info = &jit_string_patches_.back();
return &info->label;
}
@@ -5679,7 +5679,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86_64(load);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86_64(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -5969,8 +5969,8 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
__ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
}
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -6001,8 +6001,8 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -6096,8 +6096,8 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
bool is_type_check_slow_path_fatal =
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
SlowPathCode* type_check_slow_path =
- new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
- is_type_check_slow_path_fatal);
+ new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
+ instruction, is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
@@ -6522,7 +6522,7 @@ void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(
"have different sizes.");
// Slow path marking the GC root `root`.
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
instruction, root, /* unpoison_ref_before_marking */ false);
codegen_->AddSlowPath(slow_path);
@@ -6654,10 +6654,10 @@ void CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction
if (always_update_field) {
DCHECK(temp1 != nullptr);
DCHECK(temp2 != nullptr);
- slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
+ slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2);
} else {
- slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64(
+ slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
instruction, ref, /* unpoison_ref_before_marking */ true);
}
AddSlowPath(slow_path);
@@ -6690,7 +6690,7 @@ void CodeGeneratorX86_64::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator())
+ SlowPathCode* slow_path = new (GetScopedAllocator())
ReadBarrierForHeapReferenceSlowPathX86_64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -6726,7 +6726,7 @@ void CodeGeneratorX86_64::GenerateReadBarrierForRootSlow(HInstruction* instructi
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCode* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
+ new (GetScopedAllocator()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
@@ -6833,6 +6833,16 @@ void InstructionCodeGeneratorX86_64::VisitPackedSwitch(HPackedSwitch* switch_ins
__ jmp(temp_reg);
}
+void LocationsBuilderX86_64::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorX86_64::VisitIntermediateAddress(HIntermediateAddress* instruction
+ ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
void CodeGeneratorX86_64::Load32BitValue(CpuRegister dest, int32_t value) {
if (value == 0) {
__ xorl(dest, dest);
@@ -7115,18 +7125,14 @@ void CodeGeneratorX86_64::PatchJitRootUse(uint8_t* code,
void CodeGeneratorX86_64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
for (const PatchInfo<Label>& info : jit_string_patches_) {
- const auto it = jit_string_roots_.find(
- StringReference(&info.dex_file, dex::StringIndex(info.index)));
- DCHECK(it != jit_string_roots_.end());
- uint64_t index_in_table = it->second;
+ StringReference string_reference(&info.dex_file, dex::StringIndex(info.index));
+ uint64_t index_in_table = GetJitStringRootIndex(string_reference);
PatchJitRootUse(code, roots_data, info, index_in_table);
}
for (const PatchInfo<Label>& info : jit_class_patches_) {
- const auto it = jit_class_roots_.find(
- TypeReference(&info.dex_file, dex::TypeIndex(info.index)));
- DCHECK(it != jit_class_roots_.end());
- uint64_t index_in_table = it->second;
+ TypeReference type_reference(&info.dex_file, dex::TypeIndex(info.index));
+ uint64_t index_in_table = GetJitClassRootIndex(type_reference);
PatchJitRootUse(code, roots_data, info, index_in_table);
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 6f67a45f25..00c5c27470 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -416,10 +416,10 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void RecordBootStringPatch(HLoadString* load_string);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index,
+ dex::StringIndex string_index,
Handle<mirror::String> handle);
Label* NewJitRootClassPatch(const DexFile& dex_file,
- dex::TypeIndex dex_index,
+ dex::TypeIndex type_index,
Handle<mirror::Class> handle);
void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index e35c7c734b..ba431a5b08 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -44,22 +44,22 @@ static ::std::vector<CodegenTargetConfig> GetTargetConfigs() {
::std::vector<CodegenTargetConfig> test_config_candidates = {
#ifdef ART_ENABLE_CODEGEN_arm
// TODO: Should't this be `kThumb2` instead of `kArm` here?
- CodegenTargetConfig(kArm, create_codegen_arm_vixl32),
+ CodegenTargetConfig(InstructionSet::kArm, create_codegen_arm_vixl32),
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- CodegenTargetConfig(kArm64, create_codegen_arm64),
+ CodegenTargetConfig(InstructionSet::kArm64, create_codegen_arm64),
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- CodegenTargetConfig(kX86, create_codegen_x86),
+ CodegenTargetConfig(InstructionSet::kX86, create_codegen_x86),
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- CodegenTargetConfig(kX86_64, create_codegen_x86_64),
+ CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64),
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- CodegenTargetConfig(kMips, create_codegen_mips),
+ CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips),
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- CodegenTargetConfig(kMips64, create_codegen_mips64)
+ CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64)
#endif
};
@@ -825,7 +825,7 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) {
TEST_F(CodegenTest, MipsClobberRA) {
std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
MipsInstructionSetFeatures::FromCppDefines());
- if (!CanExecute(kMips) || features_mips->IsR6()) {
+ if (!CanExecute(InstructionSet::kMips) || features_mips->IsR6()) {
// HMipsComputeBaseMethodAddress and the NAL instruction behind it
// should only be generated on non-R6.
return;
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index bcbcc12349..c41c290c8b 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -207,7 +207,7 @@ class InternalCodeAllocator : public CodeAllocator {
static bool CanExecuteOnHardware(InstructionSet target_isa) {
return (target_isa == kRuntimeISA)
// Handle the special case of ARM, with two instructions sets (ARM32 and Thumb-2).
- || (kRuntimeISA == kArm && target_isa == kThumb2);
+ || (kRuntimeISA == InstructionSet::kArm && target_isa == InstructionSet::kThumb2);
}
static bool CanExecute(InstructionSet target_isa) {
@@ -271,7 +271,7 @@ static void Run(const InternalCodeAllocator& allocator,
typedef Expected (*fptr)();
CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize());
fptr f = reinterpret_cast<fptr>(allocator.GetMemory());
- if (target_isa == kThumb2) {
+ if (target_isa == InstructionSet::kThumb2) {
// For thumb we need the bottom bit set.
f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1);
}
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 102acb3423..ed2f8e995d 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -342,7 +342,7 @@ inline vixl::aarch64::Extend ExtendFromOpKind(HDataProcWithShifterOp::OpKind op_
}
inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
- DCHECK(HasShifterOperand(instruction, kArm64));
+ DCHECK(HasShifterOperand(instruction, InstructionSet::kArm64));
// Although the `neg` instruction is an alias of the `sub` instruction, `HNeg`
// does *not* support extension. This is because the `extended register` form
// of the `sub` instruction interprets the left register with code 31 as the
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 3b67efe100..75a7fbe6ca 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -123,7 +123,7 @@ class DataType {
}
static bool IsUnsignedType(Type type) {
- return type == Type::kUint8 || type == Type::kUint16;
+ return type == Type::kBool || type == Type::kUint8 || type == Type::kUint16;
}
// Return the general kind of `type`, fusing integer-like types as Type::kInt.
diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc
index 36e932c67a..b63914faf7 100644
--- a/compiler/optimizing/emit_swap_mips_test.cc
+++ b/compiler/optimizing/emit_swap_mips_test.cc
@@ -238,14 +238,14 @@ TEST_F(EmitSwapMipsTest, TwoStackSlots) {
DataType::Type::kInt32,
nullptr);
const char* expected =
- "addiu $sp, $sp, -4\n"
+ "addiu $sp, $sp, -16\n"
"sw $v0, 0($sp)\n"
- "lw $v0, 56($sp)\n"
- "lw $t8, 52($sp)\n"
- "sw $v0, 52($sp)\n"
- "sw $t8, 56($sp)\n"
+ "lw $v0, 68($sp)\n"
+ "lw $t8, 64($sp)\n"
+ "sw $v0, 64($sp)\n"
+ "sw $t8, 68($sp)\n"
"lw $v0, 0($sp)\n"
- "addiu $sp, $sp, 4\n";
+ "addiu $sp, $sp, 16\n";
DriverWrapper(moves_, expected, "TwoStackSlots");
}
@@ -261,18 +261,18 @@ TEST_F(EmitSwapMipsTest, TwoDoubleStackSlots) {
DataType::Type::kInt64,
nullptr);
const char* expected =
- "addiu $sp, $sp, -4\n"
+ "addiu $sp, $sp, -16\n"
"sw $v0, 0($sp)\n"
- "lw $v0, 60($sp)\n"
- "lw $t8, 52($sp)\n"
- "sw $v0, 52($sp)\n"
- "sw $t8, 60($sp)\n"
- "lw $v0, 64($sp)\n"
- "lw $t8, 56($sp)\n"
- "sw $v0, 56($sp)\n"
- "sw $t8, 64($sp)\n"
+ "lw $v0, 72($sp)\n"
+ "lw $t8, 64($sp)\n"
+ "sw $v0, 64($sp)\n"
+ "sw $t8, 72($sp)\n"
+ "lw $v0, 76($sp)\n"
+ "lw $t8, 68($sp)\n"
+ "sw $v0, 68($sp)\n"
+ "sw $t8, 76($sp)\n"
"lw $v0, 0($sp)\n"
- "addiu $sp, $sp, 4\n";
+ "addiu $sp, $sp, 16\n";
DriverWrapper(moves_, expected, "TwoDoubleStackSlots");
}
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 3851877ae5..12c69889ab 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -153,7 +153,7 @@ class HGraphVisualizerDisassembler {
}
const uint8_t* base = disassembler_->GetDisassemblerOptions()->base_address_;
- if (instruction_set_ == kThumb2) {
+ if (instruction_set_ == InstructionSet::kThumb2) {
// ARM and Thumb-2 use the same disassembler. The bottom bit of the
// address is used to distinguish between the two.
base += 1;
@@ -527,6 +527,10 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("packed_type") << vec_operation->GetPackedType();
}
+ void VisitVecMemoryOperation(HVecMemoryOperation* vec_mem_operation) OVERRIDE {
+ StartAttributeStream("alignment") << vec_mem_operation->GetAlignment().ToString();
+ }
+
void VisitVecHalvingAdd(HVecHalvingAdd* hadd) OVERRIDE {
VisitVecBinaryOperation(hadd);
StartAttributeStream("unsigned") << std::boolalpha << hadd->IsUnsigned() << std::noboolalpha;
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 902985e4ee..0f0be20961 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -382,16 +382,18 @@ ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
dex_file_->DecodeDebugPositionInfo(&code_item_, Callback::Position, locations);
// Instruction-specific tweaks.
IterationRange<DexInstructionIterator> instructions = code_item_.Instructions();
- for (const Instruction& inst : instructions) {
- switch (inst.Opcode()) {
+ for (DexInstructionIterator it = instructions.begin(); it != instructions.end(); ++it) {
+ switch (it->Opcode()) {
case Instruction::MOVE_EXCEPTION: {
// Stop in native debugger after the exception has been moved.
// The compiler also expects the move at the start of basic block so
// we do not want to interfere by inserting native-debug-info before it.
- locations->ClearBit(inst.GetDexPc(code_item_.insns_));
- const Instruction* next = inst.Next();
- if (DexInstructionIterator(next) != instructions.end()) {
- locations->SetBit(next->GetDexPc(code_item_.insns_));
+ locations->ClearBit(it.DexPc());
+ DexInstructionIterator next = it;
+ ++next;
+ DCHECK(next != it);
+ if (next != instructions.end()) {
+ locations->SetBit(next.DexPc());
}
break;
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index d81a752853..2bd2d5f0a1 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -250,7 +250,7 @@ bool InstructionSimplifierVisitor::TryCombineVecMultiplyAccumulate(HVecMul* mul)
DataType::Type type = mul->GetPackedType();
InstructionSet isa = codegen_->GetInstructionSet();
switch (isa) {
- case kArm64:
+ case InstructionSet::kArm64:
if (!(type == DataType::Type::kUint8 ||
type == DataType::Type::kInt8 ||
type == DataType::Type::kUint16 ||
@@ -259,8 +259,8 @@ bool InstructionSimplifierVisitor::TryCombineVecMultiplyAccumulate(HVecMul* mul)
return false;
}
break;
- case kMips:
- case kMips64:
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64:
if (!(type == DataType::Type::kUint8 ||
type == DataType::Type::kInt8 ||
type == DataType::Type::kUint16 ||
@@ -1044,12 +1044,14 @@ void InstructionSimplifierVisitor::VisitArraySet(HArraySet* instruction) {
}
static bool IsTypeConversionLossless(DataType::Type input_type, DataType::Type result_type) {
+ // Make sure all implicit conversions have been simplified and no new ones have been introduced.
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << "," << result_type;
// The conversion to a larger type is loss-less with the exception of two cases,
// - conversion to the unsigned type Uint16, where we may lose some bits, and
// - conversion from float to long, the only FP to integral conversion with smaller FP type.
// For integral to FP conversions this holds because the FP mantissa is large enough.
// Note: The size check excludes Uint8 as the result type.
- DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type));
return DataType::Size(result_type) > DataType::Size(input_type) &&
result_type != DataType::Type::kUint16 &&
!(result_type == DataType::Type::kInt64 && input_type == DataType::Type::kFloat32);
@@ -1253,7 +1255,10 @@ void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) {
if (input_cst != nullptr) {
int64_t value = Int64FromConstant(input_cst);
- if (value == -1) {
+ if (value == -1 ||
+ // Similar cases under zero extension.
+ (DataType::IsUnsignedType(input_other->GetType()) &&
+ ((DataType::MaxValueOfIntegralType(input_other->GetType()) & ~value) == 0))) {
// Replace code looking like
// AND dst, src, 0xFFF...FF
// with
@@ -1332,6 +1337,9 @@ void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) {
TryReplaceFieldOrArrayGetType(input_other, new_type)) {
instruction->ReplaceWith(input_other);
instruction->GetBlock()->RemoveInstruction(instruction);
+ } else if (DataType::IsTypeConversionImplicit(input_other->GetType(), new_type)) {
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
} else {
HTypeConversion* type_conversion = new (GetGraph()->GetAllocator()) HTypeConversion(
new_type, input_other, instruction->GetDexPc());
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 9422f9f30c..d41e49a0f3 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -84,7 +84,7 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor {
bool InstructionSimplifierArmVisitor::TryMergeIntoShifterOperand(HInstruction* use,
HInstruction* bitfield_op,
bool do_merge) {
- DCHECK(HasShifterOperand(use, kArm));
+ DCHECK(HasShifterOperand(use, InstructionSet::kArm));
DCHECK(use->IsBinaryOperation());
DCHECK(CanFitInShifterOperand(bitfield_op));
DCHECK(!bitfield_op->HasEnvironmentUses());
@@ -166,7 +166,7 @@ bool InstructionSimplifierArmVisitor::TryMergeIntoUsersShifterOperand(HInstructi
// Check whether we can merge the instruction in all its users' shifter operand.
for (const HUseListNode<HInstruction*>& use : uses) {
HInstruction* user = use.GetUser();
- if (!HasShifterOperand(user, kArm)) {
+ if (!HasShifterOperand(user, InstructionSet::kArm)) {
return false;
}
if (!CanMergeIntoShifterOperand(user, bitfield_op)) {
@@ -242,7 +242,7 @@ void InstructionSimplifierArmVisitor::VisitArraySet(HArraySet* instruction) {
}
void InstructionSimplifierArmVisitor::VisitMul(HMul* instruction) {
- if (TryCombineMultiplyAccumulate(instruction, kArm)) {
+ if (TryCombineMultiplyAccumulate(instruction, InstructionSet::kArm)) {
RecordSimplification();
}
}
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index c0ab68fec2..69e1463ac4 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -90,7 +90,7 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor {
bool InstructionSimplifierArm64Visitor::TryMergeIntoShifterOperand(HInstruction* use,
HInstruction* bitfield_op,
bool do_merge) {
- DCHECK(HasShifterOperand(use, kArm64));
+ DCHECK(HasShifterOperand(use, InstructionSet::kArm64));
DCHECK(use->IsBinaryOperation() || use->IsNeg());
DCHECK(CanFitInShifterOperand(bitfield_op));
DCHECK(!bitfield_op->HasEnvironmentUses());
@@ -170,7 +170,7 @@ bool InstructionSimplifierArm64Visitor::TryMergeIntoUsersShifterOperand(HInstruc
// Check whether we can merge the instruction in all its users' shifter operand.
for (const HUseListNode<HInstruction*>& use : uses) {
HInstruction* user = use.GetUser();
- if (!HasShifterOperand(user, kArm64)) {
+ if (!HasShifterOperand(user, InstructionSet::kArm64)) {
return false;
}
if (!CanMergeIntoShifterOperand(user, bitfield_op)) {
@@ -218,7 +218,7 @@ void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) {
}
void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) {
- if (TryCombineMultiplyAccumulate(instruction, kArm64)) {
+ if (TryCombineMultiplyAccumulate(instruction, InstructionSet::kArm64)) {
RecordSimplification();
}
}
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 1c13084a48..ccdcb3532d 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -90,13 +90,13 @@ bool TrySimpleMultiplyAccumulatePatterns(HMul* mul,
bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) {
DataType::Type type = mul->GetType();
switch (isa) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
if (type != DataType::Type::kInt32) {
return false;
}
break;
- case kArm64:
+ case InstructionSet::kArm64:
if (!DataType::IsIntOrLongType(type)) {
return false;
}
@@ -148,7 +148,7 @@ bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) {
mul->GetBlock()->RemoveInstruction(mul);
return true;
}
- } else if (use->IsNeg() && isa != kArm) {
+ } else if (use->IsNeg() && isa != InstructionSet::kArm) {
HMultiplyAccumulate* mulacc =
new (allocator) HMultiplyAccumulate(type,
HInstruction::kSub,
diff --git a/compiler/optimizing/instruction_simplifier_shared.h b/compiler/optimizing/instruction_simplifier_shared.h
index b016a8769e..758fc7663d 100644
--- a/compiler/optimizing/instruction_simplifier_shared.h
+++ b/compiler/optimizing/instruction_simplifier_shared.h
@@ -41,7 +41,8 @@ inline bool CanFitInShifterOperand(HInstruction* instruction) {
inline bool HasShifterOperand(HInstruction* instr, InstructionSet isa) {
// On ARM64 `neg` instructions are an alias of `sub` using the zero register
// as the first register input.
- bool res = instr->IsAdd() || instr->IsAnd() || (isa == kArm64 && instr->IsNeg()) ||
+ bool res = instr->IsAdd() || instr->IsAnd() ||
+ (isa == InstructionSet::kArm64 && instr->IsNeg()) ||
instr->IsOr() || instr->IsSub() || instr->IsXor();
return res;
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 4429e6e5b7..bdeb261dbe 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -256,30 +256,63 @@ void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke) { \
LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
<< " should have been converted to HIR"; \
}
-#define UNREACHABLE_INTRINSICS(Arch) \
-UNREACHABLE_INTRINSIC(Arch, FloatFloatToIntBits) \
-UNREACHABLE_INTRINSIC(Arch, DoubleDoubleToLongBits) \
-UNREACHABLE_INTRINSIC(Arch, FloatIsNaN) \
-UNREACHABLE_INTRINSIC(Arch, DoubleIsNaN) \
-UNREACHABLE_INTRINSIC(Arch, IntegerRotateLeft) \
-UNREACHABLE_INTRINSIC(Arch, LongRotateLeft) \
-UNREACHABLE_INTRINSIC(Arch, IntegerRotateRight) \
-UNREACHABLE_INTRINSIC(Arch, LongRotateRight) \
-UNREACHABLE_INTRINSIC(Arch, IntegerCompare) \
-UNREACHABLE_INTRINSIC(Arch, LongCompare) \
-UNREACHABLE_INTRINSIC(Arch, IntegerSignum) \
-UNREACHABLE_INTRINSIC(Arch, LongSignum) \
-UNREACHABLE_INTRINSIC(Arch, StringCharAt) \
-UNREACHABLE_INTRINSIC(Arch, StringIsEmpty) \
-UNREACHABLE_INTRINSIC(Arch, StringLength) \
-UNREACHABLE_INTRINSIC(Arch, UnsafeLoadFence) \
-UNREACHABLE_INTRINSIC(Arch, UnsafeStoreFence) \
-UNREACHABLE_INTRINSIC(Arch, UnsafeFullFence) \
-UNREACHABLE_INTRINSIC(Arch, VarHandleFullFence) \
-UNREACHABLE_INTRINSIC(Arch, VarHandleAcquireFence) \
-UNREACHABLE_INTRINSIC(Arch, VarHandleReleaseFence) \
-UNREACHABLE_INTRINSIC(Arch, VarHandleLoadLoadFence) \
-UNREACHABLE_INTRINSIC(Arch, VarHandleStoreStoreFence)
+#define UNREACHABLE_INTRINSICS(Arch) \
+UNREACHABLE_INTRINSIC(Arch, FloatFloatToIntBits) \
+UNREACHABLE_INTRINSIC(Arch, DoubleDoubleToLongBits) \
+UNREACHABLE_INTRINSIC(Arch, FloatIsNaN) \
+UNREACHABLE_INTRINSIC(Arch, DoubleIsNaN) \
+UNREACHABLE_INTRINSIC(Arch, IntegerRotateLeft) \
+UNREACHABLE_INTRINSIC(Arch, LongRotateLeft) \
+UNREACHABLE_INTRINSIC(Arch, IntegerRotateRight) \
+UNREACHABLE_INTRINSIC(Arch, LongRotateRight) \
+UNREACHABLE_INTRINSIC(Arch, IntegerCompare) \
+UNREACHABLE_INTRINSIC(Arch, LongCompare) \
+UNREACHABLE_INTRINSIC(Arch, IntegerSignum) \
+UNREACHABLE_INTRINSIC(Arch, LongSignum) \
+UNREACHABLE_INTRINSIC(Arch, StringCharAt) \
+UNREACHABLE_INTRINSIC(Arch, StringIsEmpty) \
+UNREACHABLE_INTRINSIC(Arch, StringLength) \
+UNREACHABLE_INTRINSIC(Arch, UnsafeLoadFence) \
+UNREACHABLE_INTRINSIC(Arch, UnsafeStoreFence) \
+UNREACHABLE_INTRINSIC(Arch, UnsafeFullFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleFullFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleAcquireFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleReleaseFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleLoadLoadFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleStoreStoreFence) \
+UNREACHABLE_INTRINSIC(Arch, MethodHandleInvokeExact) \
+UNREACHABLE_INTRINSIC(Arch, MethodHandleInvoke) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleCompareAndExchange) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleCompareAndExchangeAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleCompareAndExchangeRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleCompareAndSet) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGet) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndAdd) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndAddAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndAddRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseAnd) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseAndAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseAndRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseOr) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseOrAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseOrRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseXor) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseXorAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndBitwiseXorRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndSet) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndSetAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetAndSetRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetOpaque) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleGetVolatile) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleSet) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleSetOpaque) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleSetRelease) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleSetVolatile) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleWeakCompareAndSet) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleWeakCompareAndSetAcquire) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleWeakCompareAndSetPlain) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleWeakCompareAndSetRelease)
template <typename IntrinsicLocationsBuilder, typename Codegenerator>
bool IsCallFreeIntrinsic(HInvoke* invoke, Codegenerator* codegen) {
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index ee07c4f65c..ef85f9ccc4 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1339,7 +1339,7 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) {
SlowPathCodeARM64* slow_path = nullptr;
const bool can_slow_path = invoke->InputAt(1)->CanBeNull();
if (can_slow_path) {
- slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen_->AddSlowPath(slow_path);
__ Cbz(arg, slow_path->GetEntryLabel());
}
@@ -1702,7 +1702,6 @@ void IntrinsicCodeGeneratorARM64::VisitStringEquals(HInvoke* invoke) {
static void GenerateVisitStringIndexOf(HInvoke* invoke,
MacroAssembler* masm,
CodeGeneratorARM64* codegen,
- ArenaAllocator* allocator,
bool start_at_zero) {
LocationSummary* locations = invoke->GetLocations();
@@ -1717,7 +1716,7 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke,
if (static_cast<uint32_t>(code_point->AsIntConstant()->GetValue()) > 0xFFFFU) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
// rare, so for simplicity just put the full slow-path down and branch unconditionally.
- slow_path = new (allocator) IntrinsicSlowPathARM64(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -1726,7 +1725,7 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke,
} else if (code_point->GetType() != DataType::Type::kUint16) {
Register char_reg = WRegisterFrom(locations->InAt(1));
__ Tst(char_reg, 0xFFFF0000);
- slow_path = new (allocator) IntrinsicSlowPathARM64(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
}
@@ -1760,8 +1759,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateVisitStringIndexOf(
- invoke, GetVIXLAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+ GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ true);
}
void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1777,8 +1775,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateVisitStringIndexOf(
- invoke, GetVIXLAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+ GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ false);
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1798,7 +1795,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromBytes(HInvoke* invoke)
Register byte_array = WRegisterFrom(locations->InAt(0));
__ Cmp(byte_array, 0);
- SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ SlowPathCodeARM64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen_->AddSlowPath(slow_path);
__ B(eq, slow_path->GetEntryLabel());
@@ -1842,7 +1840,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromString(HInvoke* invoke
Register string_to_copy = WRegisterFrom(locations->InAt(0));
__ Cmp(string_to_copy, 0);
- SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ SlowPathCodeARM64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen_->AddSlowPath(slow_path);
__ B(eq, slow_path->GetEntryLabel());
@@ -2285,7 +2284,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopyChar(HInvoke* invoke) {
Location dst_pos = locations->InAt(3);
Location length = locations->InAt(4);
- SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ SlowPathCodeARM64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen_->AddSlowPath(slow_path);
// If source and destination are the same, take the slow path. Overlapping copy regions must be
@@ -2462,7 +2462,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
Register temp2 = WRegisterFrom(locations->GetTemp(1));
Location temp2_loc = LocationFrom(temp2);
- SlowPathCodeARM64* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ SlowPathCodeARM64* intrinsic_slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen_->AddSlowPath(intrinsic_slow_path);
vixl::aarch64::Label conditions_on_positions_validated;
@@ -2839,7 +2840,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
// Slow path used to copy array when `src` is gray.
SlowPathCodeARM64* read_barrier_slow_path =
- new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARM64(invoke, LocationFrom(tmp));
+ new (codegen_->GetScopedAllocator()) ReadBarrierSystemArrayCopySlowPathARM64(
+ invoke, LocationFrom(tmp));
codegen_->AddSlowPath(read_barrier_slow_path);
// Given the numeric representation, it's enough to check the low bit of the rb_state.
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 332306bebf..e0874d9549 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -1490,7 +1490,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
SlowPathCodeARMVIXL* slow_path = nullptr;
const bool can_slow_path = invoke->InputAt(1)->CanBeNull();
if (can_slow_path) {
- slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+ slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen_->AddSlowPath(slow_path);
__ CompareAndBranchIfZero(arg, slow_path->GetEntryLabel());
}
@@ -1916,7 +1916,6 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
static void GenerateVisitStringIndexOf(HInvoke* invoke,
ArmVIXLAssembler* assembler,
CodeGeneratorARMVIXL* codegen,
- ArenaAllocator* allocator,
bool start_at_zero) {
LocationSummary* locations = invoke->GetLocations();
@@ -1932,7 +1931,7 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke,
std::numeric_limits<uint16_t>::max()) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
// rare, so for simplicity just put the full slow-path down and branch unconditionally.
- slow_path = new (allocator) IntrinsicSlowPathARMVIXL(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -1942,7 +1941,7 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke,
vixl32::Register char_reg = InputRegisterAt(invoke, 1);
// 0xffff is not modified immediate but 0x10000 is, so use `>= 0x10000` instead of `> 0xffff`.
__ Cmp(char_reg, static_cast<uint32_t>(std::numeric_limits<uint16_t>::max()) + 1);
- slow_path = new (allocator) IntrinsicSlowPathARMVIXL(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen->AddSlowPath(slow_path);
__ B(hs, slow_path->GetEntryLabel());
}
@@ -1977,8 +1976,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
- GenerateVisitStringIndexOf(
- invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+ GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1994,8 +1992,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke)
}
void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateVisitStringIndexOf(
- invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+ GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -2013,7 +2010,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromBytes(HInvoke* invok
ArmVIXLAssembler* assembler = GetAssembler();
vixl32::Register byte_array = InputRegisterAt(invoke, 0);
__ Cmp(byte_array, 0);
- SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+ SlowPathCodeARMVIXL* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen_->AddSlowPath(slow_path);
__ B(eq, slow_path->GetEntryLabel());
@@ -2055,7 +2053,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromString(HInvoke* invo
ArmVIXLAssembler* assembler = GetAssembler();
vixl32::Register string_to_copy = InputRegisterAt(invoke, 0);
__ Cmp(string_to_copy, 0);
- SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+ SlowPathCodeARMVIXL* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen_->AddSlowPath(slow_path);
__ B(eq, slow_path->GetEntryLabel());
@@ -2190,7 +2189,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
Location temp3_loc = locations->GetTemp(2);
vixl32::Register temp3 = RegisterFrom(temp3_loc);
- SlowPathCodeARMVIXL* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+ SlowPathCodeARMVIXL* intrinsic_slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen_->AddSlowPath(intrinsic_slow_path);
vixl32::Label conditions_on_positions_validated;
@@ -2496,7 +2496,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
// Note that the base destination address is computed in `temp2`
// by the slow path code.
SlowPathCodeARMVIXL* read_barrier_slow_path =
- new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARMVIXL(invoke);
+ new (codegen_->GetScopedAllocator()) ReadBarrierSystemArrayCopySlowPathARMVIXL(invoke);
codegen_->AddSlowPath(read_barrier_slow_path);
// Given the numeric representation, it's enough to check the low bit of the
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 5f2f71bd4d..98ccce79e4 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2053,7 +2053,7 @@ void IntrinsicCodeGeneratorMIPS::VisitStringCompareTo(HInvoke* invoke) {
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
Register argument = locations->InAt(1).AsRegister<Register>();
- SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+ SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqz(argument, slow_path->GetEntryLabel());
codegen_->InvokeRuntime(kQuickStringCompareTo, invoke, invoke->GetDexPc(), slow_path);
@@ -2185,8 +2185,7 @@ void IntrinsicCodeGeneratorMIPS::VisitStringEquals(HInvoke* invoke) {
static void GenerateStringIndexOf(HInvoke* invoke,
bool start_at_zero,
MipsAssembler* assembler,
- CodeGeneratorMIPS* codegen,
- ArenaAllocator* allocator) {
+ CodeGeneratorMIPS* codegen) {
LocationSummary* locations = invoke->GetLocations();
Register tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<Register>() : TMP;
@@ -2202,7 +2201,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// Always needs the slow-path. We could directly dispatch to it,
// but this case should be rare, so for simplicity just put the
// full slow-path down and branch unconditionally.
- slow_path = new (allocator) IntrinsicSlowPathMIPS(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -2219,7 +2218,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// two halfwords so we fallback to using the generic implementation
// of indexOf().
__ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
- slow_path = new (allocator) IntrinsicSlowPathMIPS(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen->AddSlowPath(slow_path);
__ Bltu(tmp_reg, char_reg, slow_path->GetEntryLabel());
}
@@ -2253,11 +2252,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke,
- /* start_at_zero */ true,
- GetAssembler(),
- codegen_,
- GetAllocator());
+ GenerateStringIndexOf(invoke, /* start_at_zero */ true, GetAssembler(), codegen_);
}
// int java.lang.String.indexOf(int ch, int fromIndex)
@@ -2278,11 +2273,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke,
- /* start_at_zero */ false,
- GetAssembler(),
- codegen_,
- GetAllocator());
+ GenerateStringIndexOf(invoke, /* start_at_zero */ false, GetAssembler(), codegen_);
}
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -2303,7 +2294,7 @@ void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromBytes(HInvoke* invoke)
LocationSummary* locations = invoke->GetLocations();
Register byte_array = locations->InAt(0).AsRegister<Register>();
- SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+ SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqz(byte_array, slow_path->GetEntryLabel());
codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc(), slow_path);
@@ -2347,7 +2338,7 @@ void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromString(HInvoke* invoke)
LocationSummary* locations = invoke->GetLocations();
Register string_to_copy = locations->InAt(0).AsRegister<Register>();
- SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+ SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqz(string_to_copy, slow_path->GetEntryLabel());
codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc());
@@ -3059,7 +3050,7 @@ void IntrinsicCodeGeneratorMIPS::VisitSystemArrayCopyChar(HInvoke* invoke) {
Register src_base = locations->GetTemp(1).AsRegister<Register>();
Register count = locations->GetTemp(2).AsRegister<Register>();
- SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+ SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen_->AddSlowPath(slow_path);
// Bail out if the source and destination are the same (to handle overlap).
@@ -3212,6 +3203,26 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerValueOf(HInvoke* invoke) {
}
}
+// static boolean java.lang.Thread.interrupted()
+void IntrinsicLocationsBuilderMIPS::VisitThreadInterrupted(HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitThreadInterrupted(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ Register out = invoke->GetLocations()->Out().AsRegister<Register>();
+ int32_t offset = Thread::InterruptedOffset<kMipsPointerSize>().Int32Value();
+ __ LoadFromOffset(kLoadWord, out, TR, offset);
+ MipsLabel done;
+ __ Beqz(out, &done);
+ __ Sync(0);
+ __ StoreToOffset(kStoreWord, ZERO, TR, offset);
+ __ Sync(0);
+ __ Bind(&done);
+}
+
// Unimplemented intrinsics.
UNIMPLEMENTED_INTRINSIC(MIPS, MathCeil)
@@ -3241,8 +3252,6 @@ UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetInt)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject)
-UNIMPLEMENTED_INTRINSIC(MIPS, ThreadInterrupted)
-
UNREACHABLE_INTRINSICS(MIPS)
#undef __
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 8d5be80202..f62913430e 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1626,7 +1626,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringCompareTo(HInvoke* invoke) {
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
GpuRegister argument = locations->InAt(1).AsRegister<GpuRegister>();
- SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ SlowPathCodeMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqzc(argument, slow_path->GetEntryLabel());
@@ -1754,7 +1755,6 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringEquals(HInvoke* invoke) {
static void GenerateStringIndexOf(HInvoke* invoke,
Mips64Assembler* assembler,
CodeGeneratorMIPS64* codegen,
- ArenaAllocator* allocator,
bool start_at_zero) {
LocationSummary* locations = invoke->GetLocations();
GpuRegister tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<GpuRegister>() : TMP;
@@ -1771,7 +1771,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// Always needs the slow-path. We could directly dispatch to it,
// but this case should be rare, so for simplicity just put the
// full slow-path down and branch unconditionally.
- slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
codegen->AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -1780,7 +1780,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
} else if (code_point->GetType() != DataType::Type::kUint16) {
GpuRegister char_reg = locations->InAt(1).AsRegister<GpuRegister>();
__ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
- slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
codegen->AddSlowPath(slow_path);
__ Bltuc(tmp_reg, char_reg, slow_path->GetEntryLabel()); // UTF-16 required
}
@@ -1816,7 +1816,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
}
// int java.lang.String.indexOf(int ch, int fromIndex)
@@ -1834,8 +1834,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(
- invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
}
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -1856,7 +1855,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke
LocationSummary* locations = invoke->GetLocations();
GpuRegister byte_array = locations->InAt(0).AsRegister<GpuRegister>();
- SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ SlowPathCodeMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqzc(byte_array, slow_path->GetEntryLabel());
@@ -1903,7 +1903,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromString(HInvoke* invok
LocationSummary* locations = invoke->GetLocations();
GpuRegister string_to_copy = locations->InAt(0).AsRegister<GpuRegister>();
- SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ SlowPathCodeMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqzc(string_to_copy, slow_path->GetEntryLabel());
@@ -2160,7 +2161,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitSystemArrayCopyChar(HInvoke* invoke) {
GpuRegister src_base = locations->GetTemp(1).AsRegister<GpuRegister>();
GpuRegister count = locations->GetTemp(2).AsRegister<GpuRegister>();
- SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ SlowPathCodeMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
codegen_->AddSlowPath(slow_path);
// Bail out if the source and destination are the same (to handle overlap).
@@ -2584,6 +2586,26 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
}
}
+// static boolean java.lang.Thread.interrupted()
+void IntrinsicLocationsBuilderMIPS64::VisitThreadInterrupted(HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitThreadInterrupted(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
+ int32_t offset = Thread::InterruptedOffset<kMips64PointerSize>().Int32Value();
+ __ LoadFromOffset(kLoadWord, out, TR, offset);
+ Mips64Label done;
+ __ Beqzc(out, &done);
+ __ Sync(0);
+ __ StoreToOffset(kStoreWord, ZERO, TR, offset);
+ __ Sync(0);
+ __ Bind(&done);
+}
+
UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
@@ -2603,8 +2625,6 @@ UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetInt)
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject)
-UNIMPLEMENTED_INTRINSIC(MIPS64, ThreadInterrupted)
-
UNREACHABLE_INTRINSICS(MIPS64)
#undef __
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 8b389ba876..8a0b6aeb0e 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1253,7 +1253,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopyChar(HInvoke* invoke) {
Register count = locations->GetTemp(2).AsRegister<Register>();
DCHECK_EQ(count, ECX);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(slow_path);
// Bail out if the source and destination are the same (to handle overlap).
@@ -1336,7 +1336,7 @@ void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) {
Register argument = locations->InAt(1).AsRegister<Register>();
__ testl(argument, argument);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -1485,7 +1485,6 @@ static void CreateStringIndexOfLocations(HInvoke* invoke,
static void GenerateStringIndexOf(HInvoke* invoke,
X86Assembler* assembler,
CodeGeneratorX86* codegen,
- ArenaAllocator* allocator,
bool start_at_zero) {
LocationSummary* locations = invoke->GetLocations();
@@ -1515,7 +1514,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
std::numeric_limits<uint16_t>::max()) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
// rare, so for simplicity just put the full slow-path down and branch unconditionally.
- slow_path = new (allocator) IntrinsicSlowPathX86(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -1523,7 +1522,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
}
} else if (code_point->GetType() != DataType::Type::kUint16) {
__ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max()));
- slow_path = new (allocator) IntrinsicSlowPathX86(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen->AddSlowPath(slow_path);
__ j(kAbove, slow_path->GetEntryLabel());
}
@@ -1640,7 +1639,7 @@ void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1648,8 +1647,7 @@ void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(
- invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1669,7 +1667,7 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
Register byte_array = locations->InAt(0).AsRegister<Register>();
__ testl(byte_array, byte_array);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -1713,7 +1711,7 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromString(HInvoke* invoke)
Register string_to_copy = locations->InAt(0).AsRegister<Register>();
__ testl(string_to_copy, string_to_copy);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -2901,7 +2899,8 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
Location temp2_loc = locations->GetTemp(1);
Register temp2 = temp2_loc.AsRegister<Register>();
- SlowPathCode* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ SlowPathCode* intrinsic_slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(intrinsic_slow_path);
NearLabel conditions_on_positions_validated;
@@ -3215,7 +3214,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
// Slow path used to copy array when `src` is gray.
SlowPathCode* read_barrier_slow_path =
- new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathX86(invoke);
+ new (codegen_->GetScopedAllocator()) ReadBarrierSystemArrayCopySlowPathX86(invoke);
codegen_->AddSlowPath(read_barrier_slow_path);
// We have done the "if" of the gray bit check above, now branch based on the flags.
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 6337900b71..92ffda427b 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1033,7 +1033,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
CpuRegister count = locations->GetTemp(2).AsRegister<CpuRegister>();
DCHECK_EQ(count.AsRegister(), RCX);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen_->AddSlowPath(slow_path);
// Bail out if the source and destination are the same.
@@ -1175,7 +1175,8 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
CpuRegister temp3 = temp3_loc.AsRegister<CpuRegister>();
Location TMP_loc = Location::RegisterLocation(TMP);
- SlowPathCode* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ SlowPathCode* intrinsic_slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen_->AddSlowPath(intrinsic_slow_path);
NearLabel conditions_on_positions_validated;
@@ -1449,7 +1450,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
// Slow path used to copy array when `src` is gray.
SlowPathCode* read_barrier_slow_path =
- new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathX86_64(invoke);
+ new (codegen_->GetScopedAllocator()) ReadBarrierSystemArrayCopySlowPathX86_64(invoke);
codegen_->AddSlowPath(read_barrier_slow_path);
// We have done the "if" of the gray bit check above, now branch based on the flags.
@@ -1510,7 +1511,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCompareTo(HInvoke* invoke) {
CpuRegister argument = locations->InAt(1).AsRegister<CpuRegister>();
__ testl(argument, argument);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -1655,7 +1656,6 @@ static void CreateStringIndexOfLocations(HInvoke* invoke,
static void GenerateStringIndexOf(HInvoke* invoke,
X86_64Assembler* assembler,
CodeGeneratorX86_64* codegen,
- ArenaAllocator* allocator,
bool start_at_zero) {
LocationSummary* locations = invoke->GetLocations();
@@ -1683,7 +1683,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
std::numeric_limits<uint16_t>::max()) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
// rare, so for simplicity just put the full slow-path down and branch unconditionally.
- slow_path = new (allocator) IntrinsicSlowPathX86_64(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -1691,7 +1691,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
}
} else if (code_point->GetType() != DataType::Type::kUint16) {
__ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max()));
- slow_path = new (allocator) IntrinsicSlowPathX86_64(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen->AddSlowPath(slow_path);
__ j(kAbove, slow_path->GetEntryLabel());
}
@@ -1800,7 +1800,7 @@ void IntrinsicLocationsBuilderX86_64::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1808,8 +1808,7 @@ void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(
- invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1829,7 +1828,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromBytes(HInvoke* invoke
CpuRegister byte_array = locations->InAt(0).AsRegister<CpuRegister>();
__ testl(byte_array, byte_array);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -1873,7 +1872,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromString(HInvoke* invok
CpuRegister string_to_copy = locations->InAt(0).AsRegister<CpuRegister>();
__ testl(string_to_copy, string_to_copy);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index 5940ee755f..5a1df45914 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -196,8 +196,12 @@ class HeapLocationCollector : public HGraphVisitor {
}
HInstruction* HuntForOriginalReference(HInstruction* ref) const {
+ // An original reference can be transformed by instructions like:
+ // i0 NewArray
+ // i1 HInstruction(i0) <-- NullCheck, BoundType, IntermediateAddress.
+ // i2 ArrayGet(i1, index)
DCHECK(ref != nullptr);
- while (ref->IsNullCheck() || ref->IsBoundType()) {
+ while (ref->IsNullCheck() || ref->IsBoundType() || ref->IsIntermediateAddress()) {
ref = ref->InputAt(0);
}
return ref;
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index 86696d02a1..b41e1e4d00 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -389,4 +389,68 @@ TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) {
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
}
+TEST_F(LoadStoreAnalysisTest, TestHuntOriginalRef) {
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+
+ // Different ways where orignal array reference are transformed & passed to ArrayGet.
+ // ParameterValue --> ArrayGet
+ // ParameterValue --> BoundType --> ArrayGet
+ // ParameterValue --> BoundType --> NullCheck --> ArrayGet
+ // ParameterValue --> BoundType --> NullCheck --> IntermediateAddress --> ArrayGet
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* array = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
+ HInstruction* array_get1 = new (GetAllocator()) HArrayGet(array,
+ c1,
+ DataType::Type::kInt32,
+ 0);
+
+ HInstruction* bound_type = new (GetAllocator()) HBoundType(array);
+ HInstruction* array_get2 = new (GetAllocator()) HArrayGet(bound_type,
+ c1,
+ DataType::Type::kInt32,
+ 0);
+
+ HInstruction* null_check = new (GetAllocator()) HNullCheck(bound_type, 0);
+ HInstruction* array_get3 = new (GetAllocator()) HArrayGet(null_check,
+ c1,
+ DataType::Type::kInt32,
+ 0);
+
+ HInstruction* inter_addr = new (GetAllocator()) HIntermediateAddress(null_check, c1, 0);
+ HInstruction* array_get4 = new (GetAllocator()) HArrayGet(inter_addr,
+ c1,
+ DataType::Type::kInt32,
+ 0);
+ entry->AddInstruction(array);
+ entry->AddInstruction(array_get1);
+ entry->AddInstruction(bound_type);
+ entry->AddInstruction(array_get2);
+ entry->AddInstruction(null_check);
+ entry->AddInstruction(array_get3);
+ entry->AddInstruction(inter_addr);
+ entry->AddInstruction(array_get4);
+
+ HeapLocationCollector heap_location_collector(graph_);
+ heap_location_collector.VisitBasicBlock(entry);
+
+ // Test that the HeapLocationCollector should be able to tell
+ // that there is only ONE array location, no matter how many
+ // times the original reference has been transformed by BoundType,
+ // NullCheck, IntermediateAddress, etc.
+ ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 1U);
+ size_t loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, c1);
+ size_t loc2 = heap_location_collector.GetArrayAccessHeapLocation(bound_type, c1);
+ size_t loc3 = heap_location_collector.GetArrayAccessHeapLocation(null_check, c1);
+ size_t loc4 = heap_location_collector.GetArrayAccessHeapLocation(inter_addr, c1);
+ ASSERT_TRUE(loc1 != HeapLocationCollector::kHeapLocationNotFound);
+ ASSERT_EQ(loc1, loc2);
+ ASSERT_EQ(loc1, loc3);
+ ASSERT_EQ(loc1, loc4);
+}
+
} // namespace art
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index af5585ec92..7dff696e32 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -39,13 +39,14 @@ static HInstruction* const kUnknownHeapValue =
static HInstruction* const kDefaultHeapValue =
reinterpret_cast<HInstruction*>(static_cast<uintptr_t>(-2));
-class LSEVisitor : public HGraphVisitor {
+// Use HGraphDelegateVisitor for which all VisitInvokeXXX() delegate to VisitInvoke().
+class LSEVisitor : public HGraphDelegateVisitor {
public:
LSEVisitor(HGraph* graph,
const HeapLocationCollector& heap_locations_collector,
const SideEffectsAnalysis& side_effects,
OptimizingCompilerStats* stats)
- : HGraphVisitor(graph, stats),
+ : HGraphDelegateVisitor(graph, stats),
heap_location_collector_(heap_locations_collector),
side_effects_(side_effects),
allocator_(graph->GetArenaStack()),
@@ -540,23 +541,7 @@ class LSEVisitor : public HGraphVisitor {
}
}
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+ void VisitInvoke(HInvoke* invoke) OVERRIDE {
HandleInvoke(invoke);
}
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 69c6b94c6b..c672dae1d7 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -25,6 +25,8 @@
#include "arch/x86_64/instruction_set_features_x86_64.h"
#include "driver/compiler_driver.h"
#include "linear_order.h"
+#include "mirror/array-inl.h"
+#include "mirror/string.h"
namespace art {
@@ -71,12 +73,25 @@ static inline void NormalizePackedType(/* inout */ DataType::Type* type,
// Enables vectorization (SIMDization) in the loop optimizer.
static constexpr bool kEnableVectorization = true;
-// All current SIMD targets want 16-byte alignment.
-static constexpr size_t kAlignedBase = 16;
-
// No loop unrolling factor (just one copy of the loop-body).
static constexpr uint32_t kNoUnrollingFactor = 1;
+//
+// Static helpers.
+//
+
+// Base alignment for arrays/strings guaranteed by the Android runtime.
+static uint32_t BaseAlignment() {
+ return kObjectAlignment;
+}
+
+// Hidden offset for arrays/strings guaranteed by the Android runtime.
+static uint32_t HiddenOffset(DataType::Type type, bool is_string_char_at) {
+ return is_string_char_at
+ ? mirror::String::ValueOffset().Uint32Value()
+ : mirror::Array::DataOffset(DataType::Size(type)).Uint32Value();
+}
+
// Remove the instruction from the graph. A bit more elaborate than the usual
// instruction removal, since there may be a cycle in the use structure.
static void RemoveFromCycle(HInstruction* instruction) {
@@ -123,7 +138,7 @@ static bool IsSignExtensionAndGet(HInstruction* instruction,
/*out*/ HInstruction** operand) {
// Accept any already wider constant that would be handled properly by sign
// extension when represented in the *width* of the given narrower data type
- // (the fact that Uint16 normally zero extends does not matter here).
+ // (the fact that Uint8/Uint16 normally zero extend does not matter here).
int64_t value = 0;
if (IsInt64AndGet(instruction, /*out*/ &value)) {
switch (type) {
@@ -221,31 +236,6 @@ static bool IsZeroExtensionAndGet(HInstruction* instruction,
return false;
}
}
- // A sign (or zero) extension followed by an explicit removal of just the
- // higher sign bits is equivalent to a zero extension of the underlying operand.
- //
- // TODO: move this into simplifier and use new type system instead.
- //
- if (instruction->IsAnd()) {
- int64_t mask = 0;
- HInstruction* a = instruction->InputAt(0);
- HInstruction* b = instruction->InputAt(1);
- // In (a & b) find (mask & b) or (a & mask) with sign or zero extension on the non-mask.
- if ((IsInt64AndGet(a, /*out*/ &mask) && (IsSignExtensionAndGet(b, type, /*out*/ operand) ||
- IsZeroExtensionAndGet(b, type, /*out*/ operand))) ||
- (IsInt64AndGet(b, /*out*/ &mask) && (IsSignExtensionAndGet(a, type, /*out*/ operand) ||
- IsZeroExtensionAndGet(a, type, /*out*/ operand)))) {
- switch ((*operand)->GetType()) {
- case DataType::Type::kUint8:
- case DataType::Type::kInt8:
- return mask == std::numeric_limits<uint8_t>::max();
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- return mask == std::numeric_limits<uint16_t>::max();
- default: return false;
- }
- }
- }
// An explicit widening conversion of an unsigned expression zero-extends.
if (instruction->IsTypeConversion()) {
HInstruction* conv = instruction->InputAt(0);
@@ -277,10 +267,15 @@ static bool IsNarrowerOperands(HInstruction* a,
/*out*/ HInstruction** r,
/*out*/ HInstruction** s,
/*out*/ bool* is_unsigned) {
- if (IsSignExtensionAndGet(a, type, r) && IsSignExtensionAndGet(b, type, s)) {
+ // Look for a matching sign extension.
+ DataType::Type stype = HVecOperation::ToSignedType(type);
+ if (IsSignExtensionAndGet(a, stype, r) && IsSignExtensionAndGet(b, stype, s)) {
*is_unsigned = false;
return true;
- } else if (IsZeroExtensionAndGet(a, type, r) && IsZeroExtensionAndGet(b, type, s)) {
+ }
+ // Look for a matching zero extension.
+ DataType::Type utype = HVecOperation::ToUnsignedType(type);
+ if (IsZeroExtensionAndGet(a, utype, r) && IsZeroExtensionAndGet(b, utype, s)) {
*is_unsigned = true;
return true;
}
@@ -292,10 +287,15 @@ static bool IsNarrowerOperand(HInstruction* a,
DataType::Type type,
/*out*/ HInstruction** r,
/*out*/ bool* is_unsigned) {
- if (IsSignExtensionAndGet(a, type, r)) {
+ // Look for a matching sign extension.
+ DataType::Type stype = HVecOperation::ToSignedType(type);
+ if (IsSignExtensionAndGet(a, stype, r)) {
*is_unsigned = false;
return true;
- } else if (IsZeroExtensionAndGet(a, type, r)) {
+ }
+ // Look for a matching zero extension.
+ DataType::Type utype = HVecOperation::ToUnsignedType(type);
+ if (IsZeroExtensionAndGet(a, utype, r)) {
*is_unsigned = true;
return true;
}
@@ -303,7 +303,7 @@ static bool IsNarrowerOperand(HInstruction* a,
}
// Compute relative vector length based on type difference.
-static size_t GetOtherVL(DataType::Type other_type, DataType::Type vector_type, size_t vl) {
+static uint32_t GetOtherVL(DataType::Type other_type, DataType::Type vector_type, uint32_t vl) {
DCHECK(DataType::IsIntegralType(other_type));
DCHECK(DataType::IsIntegralType(vector_type));
DCHECK_GE(DataType::SizeShift(other_type), DataType::SizeShift(vector_type));
@@ -410,7 +410,7 @@ static HVecReduce::ReductionKind GetReductionKind(HVecOperation* reduction) {
} else if (reduction->IsVecMax()) {
return HVecReduce::kMax;
}
- LOG(FATAL) << "Unsupported SIMD reduction";
+ LOG(FATAL) << "Unsupported SIMD reduction " << reduction->GetId();
UNREACHABLE();
}
@@ -461,7 +461,8 @@ HLoopOptimization::HLoopOptimization(HGraph* graph,
simplified_(false),
vector_length_(0),
vector_refs_(nullptr),
- vector_peeling_candidate_(nullptr),
+ vector_static_peeling_factor_(0),
+ vector_dynamic_peeling_candidate_(nullptr),
vector_runtime_test_a_(nullptr),
vector_runtime_test_b_(nullptr),
vector_map_(nullptr),
@@ -761,7 +762,8 @@ bool HLoopOptimization::ShouldVectorize(LoopNode* node, HBasicBlock* block, int6
// Reset vector bookkeeping.
vector_length_ = 0;
vector_refs_->clear();
- vector_peeling_candidate_ = nullptr;
+ vector_static_peeling_factor_ = 0;
+ vector_dynamic_peeling_candidate_ = nullptr;
vector_runtime_test_a_ =
vector_runtime_test_b_= nullptr;
@@ -778,10 +780,17 @@ bool HLoopOptimization::ShouldVectorize(LoopNode* node, HBasicBlock* block, int6
}
}
- // Does vectorization seem profitable?
- if (!IsVectorizationProfitable(trip_count)) {
- return false;
- }
+ // Prepare alignment analysis:
+ // (1) find desired alignment (SIMD vector size in bytes).
+ // (2) initialize static loop peeling votes (peeling factor that will
+ // make one particular reference aligned), never to exceed (1).
+ // (3) variable to record how many references share same alignment.
+ // (4) variable to record suitable candidate for dynamic loop peeling.
+ uint32_t desired_alignment = GetVectorSizeInBytes();
+ DCHECK_LE(desired_alignment, 16u);
+ uint32_t peeling_votes[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ uint32_t max_num_same_alignment = 0;
+ const ArrayReference* peeling_candidate = nullptr;
// Data dependence analysis. Find each pair of references with same type, where
// at least one is a write. Each such pair denotes a possible data dependence.
@@ -789,9 +798,10 @@ bool HLoopOptimization::ShouldVectorize(LoopNode* node, HBasicBlock* block, int6
// aliased, as well as the property that references either point to the same
// array or to two completely disjoint arrays, i.e., no partial aliasing.
// Other than a few simply heuristics, no detailed subscript analysis is done.
- // The scan over references also finds a suitable dynamic loop peeling candidate.
- const ArrayReference* candidate = nullptr;
+ // The scan over references also prepares finding a suitable alignment strategy.
for (auto i = vector_refs_->begin(); i != vector_refs_->end(); ++i) {
+ uint32_t num_same_alignment = 0;
+ // Scan over all next references.
for (auto j = i; ++j != vector_refs_->end(); ) {
if (i->type == j->type && (i->lhs || j->lhs)) {
// Found same-typed a[i+x] vs. b[i+y], where at least one is a write.
@@ -805,6 +815,10 @@ bool HLoopOptimization::ShouldVectorize(LoopNode* node, HBasicBlock* block, int6
if (x != y) {
return false;
}
+ // Count the number of references that have the same alignment (since
+ // base and offset are the same) and where at least one is a write, so
+ // e.g. a[i] = a[i] + b[i] counts a[i] but not b[i]).
+ num_same_alignment++;
} else {
// Found a[i+x] vs. b[i+y]. Accept if x == y (at worst loop-independent data dependence).
// Conservatively assume a potential loop-carried data dependence otherwise, avoided by
@@ -823,10 +837,38 @@ bool HLoopOptimization::ShouldVectorize(LoopNode* node, HBasicBlock* block, int6
}
}
}
- }
+ // Update information for finding suitable alignment strategy:
+ // (1) update votes for static loop peeling,
+ // (2) update suitable candidate for dynamic loop peeling.
+ Alignment alignment = ComputeAlignment(i->offset, i->type, i->is_string_char_at);
+ if (alignment.Base() >= desired_alignment) {
+ // If the array/string object has a known, sufficient alignment, use the
+ // initial offset to compute the static loop peeling vote (this always
+ // works, since elements have natural alignment).
+ uint32_t offset = alignment.Offset() & (desired_alignment - 1u);
+ uint32_t vote = (offset == 0)
+ ? 0
+ : ((desired_alignment - offset) >> DataType::SizeShift(i->type));
+ DCHECK_LT(vote, 16u);
+ ++peeling_votes[vote];
+ } else if (BaseAlignment() >= desired_alignment &&
+ num_same_alignment > max_num_same_alignment) {
+ // Otherwise, if the array/string object has a known, sufficient alignment
+ // for just the base but with an unknown offset, record the candidate with
+ // the most occurrences for dynamic loop peeling (again, the peeling always
+ // works, since elements have natural alignment).
+ max_num_same_alignment = num_same_alignment;
+ peeling_candidate = &(*i);
+ }
+ } // for i
+
+ // Find a suitable alignment strategy.
+ SetAlignmentStrategy(peeling_votes, peeling_candidate);
- // Consider dynamic loop peeling for alignment.
- SetPeelingCandidate(candidate, trip_count);
+ // Does vectorization seem profitable?
+ if (!IsVectorizationProfitable(trip_count)) {
+ return false;
+ }
// Success!
return true;
@@ -843,9 +885,12 @@ void HLoopOptimization::Vectorize(LoopNode* node,
uint32_t unroll = GetUnrollingFactor(block, trip_count);
uint32_t chunk = vector_length_ * unroll;
+ DCHECK(trip_count == 0 || (trip_count >= MaxNumberPeeled() + chunk));
+
// A cleanup loop is needed, at least, for any unknown trip count or
// for a known trip count with remainder iterations after vectorization.
- bool needs_cleanup = trip_count == 0 || (trip_count % chunk) != 0;
+ bool needs_cleanup = trip_count == 0 ||
+ ((trip_count - vector_static_peeling_factor_) % chunk) != 0;
// Adjust vector bookkeeping.
HPhi* main_phi = nullptr;
@@ -859,21 +904,40 @@ void HLoopOptimization::Vectorize(LoopNode* node,
DCHECK(induc_type == DataType::Type::kInt32 || induc_type == DataType::Type::kInt64)
<< induc_type;
- // Generate dynamic loop peeling trip count, if needed, under the assumption
- // that the Android runtime guarantees at least "component size" alignment:
- // ptc = (ALIGN - (&a[initial] % ALIGN)) / type-size
+ // Generate the trip count for static or dynamic loop peeling, if needed:
+ // ptc = <peeling factor>;
HInstruction* ptc = nullptr;
- if (vector_peeling_candidate_ != nullptr) {
- DCHECK_LT(vector_length_, trip_count) << "dynamic peeling currently requires known trip count";
- //
- // TODO: Implement this. Compute address of first access memory location and
- // compute peeling factor to obtain kAlignedBase alignment.
- //
- needs_cleanup = true;
+ if (vector_static_peeling_factor_ != 0) {
+ // Static loop peeling for SIMD alignment (using the most suitable
+ // fixed peeling factor found during prior alignment analysis).
+ DCHECK(vector_dynamic_peeling_candidate_ == nullptr);
+ ptc = graph_->GetConstant(induc_type, vector_static_peeling_factor_);
+ } else if (vector_dynamic_peeling_candidate_ != nullptr) {
+ // Dynamic loop peeling for SIMD alignment (using the most suitable
+ // candidate found during prior alignment analysis):
+ // rem = offset % ALIGN; // adjusted as #elements
+ // ptc = rem == 0 ? 0 : (ALIGN - rem);
+ uint32_t shift = DataType::SizeShift(vector_dynamic_peeling_candidate_->type);
+ uint32_t align = GetVectorSizeInBytes() >> shift;
+ uint32_t hidden_offset = HiddenOffset(vector_dynamic_peeling_candidate_->type,
+ vector_dynamic_peeling_candidate_->is_string_char_at);
+ HInstruction* adjusted_offset = graph_->GetConstant(induc_type, hidden_offset >> shift);
+ HInstruction* offset = Insert(preheader, new (global_allocator_) HAdd(
+ induc_type, vector_dynamic_peeling_candidate_->offset, adjusted_offset));
+ HInstruction* rem = Insert(preheader, new (global_allocator_) HAnd(
+ induc_type, offset, graph_->GetConstant(induc_type, align - 1u)));
+ HInstruction* sub = Insert(preheader, new (global_allocator_) HSub(
+ induc_type, graph_->GetConstant(induc_type, align), rem));
+ HInstruction* cond = Insert(preheader, new (global_allocator_) HEqual(
+ rem, graph_->GetConstant(induc_type, 0)));
+ ptc = Insert(preheader, new (global_allocator_) HSelect(
+ cond, graph_->GetConstant(induc_type, 0), sub, kNoDexPc));
+ needs_cleanup = true; // don't know the exact amount
}
// Generate loop control:
// stc = <trip-count>;
+ // ptc = min(stc, ptc);
// vtc = stc - (stc - ptc) % chunk;
// i = 0;
HInstruction* stc = induction_range_.GenerateTripCount(node->loop_info, graph_, preheader);
@@ -882,6 +946,10 @@ void HLoopOptimization::Vectorize(LoopNode* node,
DCHECK(IsPowerOfTwo(chunk));
HInstruction* diff = stc;
if (ptc != nullptr) {
+ if (trip_count == 0) {
+ HInstruction* cond = Insert(preheader, new (global_allocator_) HAboveOrEqual(stc, ptc));
+ ptc = Insert(preheader, new (global_allocator_) HSelect(cond, ptc, stc, kNoDexPc));
+ }
diff = Insert(preheader, new (global_allocator_) HSub(induc_type, stc, ptc));
}
HInstruction* rem = Insert(
@@ -904,9 +972,13 @@ void HLoopOptimization::Vectorize(LoopNode* node,
needs_cleanup = true;
}
- // Generate dynamic peeling loop for alignment, if needed:
+ // Generate alignment peeling loop, if needed:
// for ( ; i < ptc; i += 1)
// <loop-body>
+ //
+ // NOTE: The alignment forced by the peeling loop is preserved even if data is
+ // moved around during suspend checks, since all analysis was based on
+ // nothing more than the Android runtime alignment conventions.
if (ptc != nullptr) {
vector_mode_ = kSequential;
GenerateNewLoop(node,
@@ -1133,7 +1205,7 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
GenerateVecSub(index, offset);
GenerateVecMem(instruction, vector_map_->Get(index), nullptr, offset, type);
} else {
- vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ false));
+ vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ false, is_string_char_at));
}
return true;
}
@@ -1159,10 +1231,9 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
DataType::Type from = conversion->GetInputType();
DataType::Type to = conversion->GetResultType();
if (DataType::IsIntegralType(from) && DataType::IsIntegralType(to)) {
- size_t size_vec = DataType::Size(type);
- size_t size_from = DataType::Size(from);
- size_t size_to = DataType::Size(to);
- DataType::Type ctype = size_from == size_vec ? from : type;
+ uint32_t size_vec = DataType::Size(type);
+ uint32_t size_from = DataType::Size(from);
+ uint32_t size_to = DataType::Size(to);
// Accept an integral conversion
// (1a) narrowing into vector type, "wider" operations cannot bring in higher order bits, or
// (1b) widening from at least vector type, and
@@ -1172,7 +1243,7 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
VectorizeUse(node, opa, generate_code, type, restrictions | kNoHiBits)) ||
(size_to >= size_from &&
size_from >= size_vec &&
- VectorizeUse(node, opa, generate_code, ctype, restrictions))) {
+ VectorizeUse(node, opa, generate_code, type, restrictions))) {
if (generate_code) {
if (vector_mode_ == kVector) {
vector_map_->Put(instruction, vector_map_->Get(opa)); // operand pass-through
@@ -1341,11 +1412,21 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
return false;
}
+uint32_t HLoopOptimization::GetVectorSizeInBytes() {
+ switch (compiler_driver_->GetInstructionSet()) {
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
+ return 8; // 64-bit SIMD
+ default:
+ return 16; // 128-bit SIMD
+ }
+}
+
bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrictions) {
const InstructionSetFeatures* features = compiler_driver_->GetInstructionSetFeatures();
switch (compiler_driver_->GetInstructionSet()) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
// Allow vectorization for all ARM devices, because Android assumes that
// ARM 32-bit always supports advanced SIMD (64-bit SIMD).
switch (type) {
@@ -1365,7 +1446,7 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
break;
}
return false;
- case kArm64:
+ case InstructionSet::kArm64:
// Allow vectorization for all ARM devices, because Android assumes that
// ARMv8 AArch64 always supports advanced SIMD (128-bit SIMD).
switch (type) {
@@ -1393,8 +1474,8 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
default:
return false;
}
- case kX86:
- case kX86_64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
// Allow vectorization for SSE4.1-enabled X86 devices only (128-bit SIMD).
if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) {
switch (type) {
@@ -1425,7 +1506,7 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
} // switch type
}
return false;
- case kMips:
+ case InstructionSet::kMips:
if (features->AsMipsInstructionSetFeatures()->HasMsa()) {
switch (type) {
case DataType::Type::kBool:
@@ -1438,10 +1519,10 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
*restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoSAD;
return TrySetVectorLength(8);
case DataType::Type::kInt32:
- *restrictions |= kNoDiv | kNoReduction | kNoSAD;
+ *restrictions |= kNoDiv | kNoSAD;
return TrySetVectorLength(4);
case DataType::Type::kInt64:
- *restrictions |= kNoDiv | kNoReduction | kNoSAD;
+ *restrictions |= kNoDiv | kNoSAD;
return TrySetVectorLength(2);
case DataType::Type::kFloat32:
*restrictions |= kNoMinMax | kNoReduction; // min/max(x, NaN)
@@ -1454,7 +1535,7 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
} // switch type
}
return false;
- case kMips64:
+ case InstructionSet::kMips64:
if (features->AsMips64InstructionSetFeatures()->HasMsa()) {
switch (type) {
case DataType::Type::kBool:
@@ -1467,10 +1548,10 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
*restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoSAD;
return TrySetVectorLength(8);
case DataType::Type::kInt32:
- *restrictions |= kNoDiv | kNoReduction | kNoSAD;
+ *restrictions |= kNoDiv | kNoSAD;
return TrySetVectorLength(4);
case DataType::Type::kInt64:
- *restrictions |= kNoDiv | kNoReduction | kNoSAD;
+ *restrictions |= kNoDiv | kNoSAD;
return TrySetVectorLength(2);
case DataType::Type::kFloat32:
*restrictions |= kNoMinMax | kNoReduction; // min/max(x, NaN)
@@ -1553,12 +1634,13 @@ void HLoopOptimization::GenerateVecMem(HInstruction* org,
HInstruction* vector = nullptr;
if (vector_mode_ == kVector) {
// Vector store or load.
+ bool is_string_char_at = false;
HInstruction* base = org->InputAt(0);
if (opb != nullptr) {
vector = new (global_allocator_) HVecStore(
global_allocator_, base, opa, opb, type, org->GetSideEffects(), vector_length_, dex_pc);
} else {
- bool is_string_char_at = org->AsArrayGet()->IsStringCharAt();
+ is_string_char_at = org->AsArrayGet()->IsStringCharAt();
vector = new (global_allocator_) HVecLoad(global_allocator_,
base,
opa,
@@ -1568,22 +1650,29 @@ void HLoopOptimization::GenerateVecMem(HInstruction* org,
is_string_char_at,
dex_pc);
}
- // Known dynamically enforced alignment?
- if (vector_peeling_candidate_ != nullptr &&
- vector_peeling_candidate_->base == base &&
- vector_peeling_candidate_->offset == offset) {
- vector->AsVecMemoryOperation()->SetAlignment(Alignment(kAlignedBase, 0));
+ // Known (forced/adjusted/original) alignment?
+ if (vector_dynamic_peeling_candidate_ != nullptr) {
+ if (vector_dynamic_peeling_candidate_->offset == offset && // TODO: diffs too?
+ DataType::Size(vector_dynamic_peeling_candidate_->type) == DataType::Size(type) &&
+ vector_dynamic_peeling_candidate_->is_string_char_at == is_string_char_at) {
+ vector->AsVecMemoryOperation()->SetAlignment( // forced
+ Alignment(GetVectorSizeInBytes(), 0));
+ }
+ } else {
+ vector->AsVecMemoryOperation()->SetAlignment( // adjusted/original
+ ComputeAlignment(offset, type, is_string_char_at, vector_static_peeling_factor_));
}
} else {
// Scalar store or load.
DCHECK(vector_mode_ == kSequential);
if (opb != nullptr) {
+ DataType::Type component_type = org->AsArraySet()->GetComponentType();
vector = new (global_allocator_) HArraySet(
- org->InputAt(0), opa, opb, type, org->GetSideEffects(), dex_pc);
+ org->InputAt(0), opa, opb, component_type, org->GetSideEffects(), dex_pc);
} else {
bool is_string_char_at = org->AsArrayGet()->IsStringCharAt();
vector = new (global_allocator_) HArrayGet(
- org->InputAt(0), opa, type, org->GetSideEffects(), dex_pc, is_string_char_at);
+ org->InputAt(0), opa, org->GetType(), org->GetSideEffects(), dex_pc, is_string_char_at);
}
}
vector_map_->Put(org, vector);
@@ -1627,7 +1716,7 @@ void HLoopOptimization::GenerateVecReductionPhiInputs(HPhi* phi, HInstruction* r
// a [initial, initial, .., initial] vector for min/max.
HVecOperation* red_vector = new_red->AsVecOperation();
HVecReduce::ReductionKind kind = GetReductionKind(red_vector);
- size_t vector_length = red_vector->GetVectorLength();
+ uint32_t vector_length = red_vector->GetVectorLength();
DataType::Type type = red_vector->GetPackedType();
if (kind == HVecReduce::ReductionKind::kSum) {
new_init = Insert(vector_preheader_,
@@ -1659,9 +1748,9 @@ void HLoopOptimization::GenerateVecReductionPhiInputs(HPhi* phi, HInstruction* r
HInstruction* HLoopOptimization::ReduceAndExtractIfNeeded(HInstruction* instruction) {
if (instruction->IsPhi()) {
HInstruction* input = instruction->InputAt(1);
- if (input->IsVecOperation()) {
+ if (input->IsVecOperation() && !input->IsVecExtractScalar()) {
HVecOperation* input_vector = input->AsVecOperation();
- size_t vector_length = input_vector->GetVectorLength();
+ uint32_t vector_length = input_vector->GetVectorLength();
DataType::Type type = input_vector->GetPackedType();
HVecReduce::ReductionKind kind = GetReductionKind(input_vector);
HBasicBlock* exit = instruction->GetBlock()->GetSuccessors()[0];
@@ -1789,7 +1878,7 @@ void HLoopOptimization::GenerateVecOp(HInstruction* org,
break;
}
default:
- LOG(FATAL) << "Unsupported SIMD intrinsic";
+ LOG(FATAL) << "Unsupported SIMD intrinsic " << org->GetId();
UNREACHABLE();
} // switch invoke
} else {
@@ -2020,35 +2109,72 @@ bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node,
// Vectorization heuristics.
//
+Alignment HLoopOptimization::ComputeAlignment(HInstruction* offset,
+ DataType::Type type,
+ bool is_string_char_at,
+ uint32_t peeling) {
+ // Combine the alignment and hidden offset that is guaranteed by
+ // the Android runtime with a known starting index adjusted as bytes.
+ int64_t value = 0;
+ if (IsInt64AndGet(offset, /*out*/ &value)) {
+ uint32_t start_offset =
+ HiddenOffset(type, is_string_char_at) + (value + peeling) * DataType::Size(type);
+ return Alignment(BaseAlignment(), start_offset & (BaseAlignment() - 1u));
+ }
+ // Otherwise, the Android runtime guarantees at least natural alignment.
+ return Alignment(DataType::Size(type), 0);
+}
+
+void HLoopOptimization::SetAlignmentStrategy(uint32_t peeling_votes[],
+ const ArrayReference* peeling_candidate) {
+ // Current heuristic: pick the best static loop peeling factor, if any,
+ // or otherwise use dynamic loop peeling on suggested peeling candidate.
+ uint32_t max_vote = 0;
+ for (int32_t i = 0; i < 16; i++) {
+ if (peeling_votes[i] > max_vote) {
+ max_vote = peeling_votes[i];
+ vector_static_peeling_factor_ = i;
+ }
+ }
+ if (max_vote == 0) {
+ vector_dynamic_peeling_candidate_ = peeling_candidate;
+ }
+}
+
+uint32_t HLoopOptimization::MaxNumberPeeled() {
+ if (vector_dynamic_peeling_candidate_ != nullptr) {
+ return vector_length_ - 1u; // worst-case
+ }
+ return vector_static_peeling_factor_; // known exactly
+}
+
bool HLoopOptimization::IsVectorizationProfitable(int64_t trip_count) {
- // Current heuristic: non-empty body with sufficient number
- // of iterations (if known).
+ // Current heuristic: non-empty body with sufficient number of iterations (if known).
// TODO: refine by looking at e.g. operation count, alignment, etc.
+ // TODO: trip count is really unsigned entity, provided the guarding test
+ // is satisfied; deal with this more carefully later
+ uint32_t max_peel = MaxNumberPeeled();
if (vector_length_ == 0) {
return false; // nothing found
- } else if (0 < trip_count && trip_count < vector_length_) {
+ } else if (trip_count < 0) {
+ return false; // guard against non-taken/large
+ } else if ((0 < trip_count) && (trip_count < (vector_length_ + max_peel))) {
return false; // insufficient iterations
}
return true;
}
-void HLoopOptimization::SetPeelingCandidate(const ArrayReference* candidate,
- int64_t trip_count ATTRIBUTE_UNUSED) {
- // Current heuristic: none.
- // TODO: implement
- vector_peeling_candidate_ = candidate;
-}
-
static constexpr uint32_t ARM64_SIMD_MAXIMUM_UNROLL_FACTOR = 8;
static constexpr uint32_t ARM64_SIMD_HEURISTIC_MAX_BODY_SIZE = 50;
uint32_t HLoopOptimization::GetUnrollingFactor(HBasicBlock* block, int64_t trip_count) {
+ uint32_t max_peel = MaxNumberPeeled();
switch (compiler_driver_->GetInstructionSet()) {
- case kArm64: {
+ case InstructionSet::kArm64: {
// Don't unroll with insufficient iterations.
// TODO: Unroll loops with unknown trip count.
DCHECK_NE(vector_length_, 0u);
- if (trip_count < 2 * vector_length_) {
+ if (trip_count < (2 * vector_length_ + max_peel)) {
return kNoUnrollingFactor;
}
// Don't unroll for large loop body size.
@@ -2060,14 +2186,14 @@ uint32_t HLoopOptimization::GetUnrollingFactor(HBasicBlock* block, int64_t trip_
// - At least one iteration of the transformed loop should be executed.
// - The loop body shouldn't be "too big" (heuristic).
uint32_t uf1 = ARM64_SIMD_HEURISTIC_MAX_BODY_SIZE / instruction_count;
- uint32_t uf2 = trip_count / vector_length_;
+ uint32_t uf2 = (trip_count - max_peel) / vector_length_;
uint32_t unroll_factor =
TruncToPowerOfTwo(std::min({uf1, uf2, ARM64_SIMD_MAXIMUM_UNROLL_FACTOR}));
DCHECK_GE(unroll_factor, 1u);
return unroll_factor;
}
- case kX86:
- case kX86_64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
default:
return kNoUnrollingFactor;
}
@@ -2127,7 +2253,7 @@ bool HLoopOptimization::TrySetPhiReduction(HPhi* phi) {
HInstruction* reduction = inputs[1];
if (HasReductionFormat(reduction, phi)) {
HLoopInformation* loop_info = phi->GetBlock()->GetLoopInformation();
- int32_t use_count = 0;
+ uint32_t use_count = 0;
bool single_use_inside_loop =
// Reduction update only used by phi.
reduction->GetUses().HasExactlyOneElement() &&
@@ -2220,7 +2346,7 @@ bool HLoopOptimization::IsUsedOutsideLoop(HLoopInformation* loop_info,
bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
bool collect_loop_uses,
- /*out*/ int32_t* use_count) {
+ /*out*/ uint32_t* use_count) {
// Deal with regular uses.
for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
HInstruction* user = use.GetUser();
@@ -2291,7 +2417,7 @@ bool HLoopOptimization::TryAssignLastValue(HLoopInformation* loop_info,
// Assigning the last value is always successful if there are no uses.
// Otherwise, it succeeds in a no early-exit loop by generating the
// proper last value assignment.
- int32_t use_count = 0;
+ uint32_t use_count = 0;
return IsOnlyUsedAfterLoop(loop_info, instruction, collect_loop_uses, &use_count) &&
(use_count == 0 ||
(!IsEarlyExit(loop_info) && TryReplaceWithLastValue(loop_info, instruction, block)));
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 768fe554e3..51e0a986b8 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -94,20 +94,24 @@ class HLoopOptimization : public HOptimization {
* Representation of a unit-stride array reference.
*/
struct ArrayReference {
- ArrayReference(HInstruction* b, HInstruction* o, DataType::Type t, bool l)
- : base(b), offset(o), type(t), lhs(l) { }
+ ArrayReference(HInstruction* b, HInstruction* o, DataType::Type t, bool l, bool c = false)
+ : base(b), offset(o), type(t), lhs(l), is_string_char_at(c) { }
bool operator<(const ArrayReference& other) const {
return
(base < other.base) ||
(base == other.base &&
(offset < other.offset || (offset == other.offset &&
(type < other.type ||
- (type == other.type && lhs < other.lhs)))));
+ (type == other.type &&
+ (lhs < other.lhs ||
+ (lhs == other.lhs &&
+ is_string_char_at < other.is_string_char_at)))))));
}
- HInstruction* base; // base address
- HInstruction* offset; // offset + i
- DataType::Type type; // component type
- bool lhs; // def/use
+ HInstruction* base; // base address
+ HInstruction* offset; // offset + i
+ DataType::Type type; // component type
+ bool lhs; // def/use
+ bool is_string_char_at; // compressed string read
};
//
@@ -152,6 +156,7 @@ class HLoopOptimization : public HOptimization {
bool generate_code,
DataType::Type type,
uint64_t restrictions);
+ uint32_t GetVectorSizeInBytes();
bool TrySetVectorType(DataType::Type type, /*out*/ uint64_t* restrictions);
bool TrySetVectorLength(uint32_t length);
void GenerateVecInv(HInstruction* org, DataType::Type type);
@@ -183,8 +188,14 @@ class HLoopOptimization : public HOptimization {
uint64_t restrictions);
// Vectorization heuristics.
+ Alignment ComputeAlignment(HInstruction* offset,
+ DataType::Type type,
+ bool is_string_char_at,
+ uint32_t peeling = 0);
+ void SetAlignmentStrategy(uint32_t peeling_votes[],
+ const ArrayReference* peeling_candidate);
+ uint32_t MaxNumberPeeled();
bool IsVectorizationProfitable(int64_t trip_count);
- void SetPeelingCandidate(const ArrayReference* candidate, int64_t trip_count);
uint32_t GetUnrollingFactor(HBasicBlock* block, int64_t trip_count);
//
@@ -202,7 +213,7 @@ class HLoopOptimization : public HOptimization {
bool IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
bool collect_loop_uses,
- /*out*/ int32_t* use_count);
+ /*out*/ uint32_t* use_count);
bool IsUsedOutsideLoop(HLoopInformation* loop_info,
HInstruction* instruction);
bool TryReplaceWithLastValue(HLoopInformation* loop_info,
@@ -254,8 +265,9 @@ class HLoopOptimization : public HOptimization {
// Contents reside in phase-local heap memory.
ScopedArenaSet<ArrayReference>* vector_refs_;
- // Dynamic loop peeling candidate for alignment.
- const ArrayReference* vector_peeling_candidate_;
+ // Static or dynamic loop peeling for alignment.
+ uint32_t vector_static_peeling_factor_;
+ const ArrayReference* vector_dynamic_peeling_candidate_;
// Dynamic data dependence test of the form a != b.
HInstruction* vector_runtime_test_a_;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 88609ea790..29c78a1e34 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1334,6 +1334,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(InstanceFieldSet, Instruction) \
M(InstanceOf, Instruction) \
M(IntConstant, Constant) \
+ M(IntermediateAddress, Instruction) \
M(InvokeUnresolved, Invoke) \
M(InvokeInterface, Invoke) \
M(InvokeStaticOrDirect, Invoke) \
@@ -1418,7 +1419,6 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(BitwiseNegatedRight, Instruction) \
M(DataProcWithShifterOp, Instruction) \
M(MultiplyAccumulate, Instruction) \
- M(IntermediateAddress, Instruction) \
M(IntermediateAddressIndex, Instruction)
#endif
@@ -6966,6 +6966,38 @@ class HParallelMove FINAL : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HParallelMove);
};
+// This instruction computes an intermediate address pointing in the 'middle' of an object. The
+// result pointer cannot be handled by GC, so extra care is taken to make sure that this value is
+// never used across anything that can trigger GC.
+// The result of this instruction is not a pointer in the sense of `DataType::Type::kreference`.
+// So we represent it by the type `DataType::Type::kInt`.
+class HIntermediateAddress FINAL : public HExpression<2> {
+ public:
+ HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
+ : HExpression(DataType::Type::kInt32, SideEffects::DependsOnGC(), dex_pc) {
+ DCHECK_EQ(DataType::Size(DataType::Type::kInt32),
+ DataType::Size(DataType::Type::kReference))
+ << "kPrimInt and kPrimNot have different sizes.";
+ SetRawInputAt(0, base_address);
+ SetRawInputAt(1, offset);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ return true;
+ }
+ bool IsActualObject() const OVERRIDE { return false; }
+
+ HInstruction* GetBaseAddress() const { return InputAt(0); }
+ HInstruction* GetOffset() const { return InputAt(1); }
+
+ DECLARE_INSTRUCTION(IntermediateAddress);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HIntermediateAddress);
+};
+
+
} // namespace art
#include "nodes_vector.h"
diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h
index 14cbf85c3f..7b4f5f7cbb 100644
--- a/compiler/optimizing/nodes_shared.h
+++ b/compiler/optimizing/nodes_shared.h
@@ -118,38 +118,6 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HBitwiseNegatedRight);
};
-
-// This instruction computes an intermediate address pointing in the 'middle' of an object. The
-// result pointer cannot be handled by GC, so extra care is taken to make sure that this value is
-// never used across anything that can trigger GC.
-// The result of this instruction is not a pointer in the sense of `DataType::Type::kreference`.
-// So we represent it by the type `DataType::Type::kInt`.
-class HIntermediateAddress FINAL : public HExpression<2> {
- public:
- HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
- : HExpression(DataType::Type::kInt32, SideEffects::DependsOnGC(), dex_pc) {
- DCHECK_EQ(DataType::Size(DataType::Type::kInt32),
- DataType::Size(DataType::Type::kReference))
- << "kPrimInt and kPrimNot have different sizes.";
- SetRawInputAt(0, base_address);
- SetRawInputAt(1, offset);
- }
-
- bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
- return true;
- }
- bool IsActualObject() const OVERRIDE { return false; }
-
- HInstruction* GetBaseAddress() const { return InputAt(0); }
- HInstruction* GetOffset() const { return InputAt(1); }
-
- DECLARE_INSTRUCTION(IntermediateAddress);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(HIntermediateAddress);
-};
-
// This instruction computes part of the array access offset (data and index offset).
//
// For array accesses the element address has the following structure:
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 4e78e4e6a2..17540b9770 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -136,6 +136,20 @@ class HVecOperation : public HVariableInputSizeInstruction {
}
}
+ // Maps an integral type to the same-size unsigned type and leaves other types alone.
+ static DataType::Type ToUnsignedType(DataType::Type type) {
+ switch (type) {
+ case DataType::Type::kBool: // 1-byte storage unit
+ case DataType::Type::kInt8:
+ return DataType::Type::kUint8;
+ case DataType::Type::kInt16:
+ return DataType::Type::kUint16;
+ default:
+ DCHECK(type != DataType::Type::kVoid && type != DataType::Type::kReference) << type;
+ return type;
+ }
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(VecOperation);
protected:
@@ -254,6 +268,8 @@ inline static bool HasConsistentPackedTypes(HInstruction* input, DataType::Type
}
DCHECK(input->IsVecOperation());
DataType::Type input_type = input->AsVecOperation()->GetPackedType();
+ DCHECK_EQ(HVecOperation::ToUnsignedType(input_type) == HVecOperation::ToUnsignedType(type),
+ HVecOperation::ToSignedType(input_type) == HVecOperation::ToSignedType(type));
return HVecOperation::ToSignedType(input_type) == HVecOperation::ToSignedType(type);
}
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index bd65cbf25e..4ad29961be 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -63,6 +63,7 @@ class OptimizingCFITest : public CFITest {
// Generate simple frame with some spills.
code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
code_gen_->GetAssembler()->cfi().SetEnabled(true);
+ code_gen_->InitializeCodeGenerationData();
const int frame_size = 64;
int core_reg = 0;
int fp_reg = 0;
@@ -152,15 +153,15 @@ class OptimizingCFITest : public CFITest {
InternalCodeAllocator code_allocator_;
};
-#define TEST_ISA(isa) \
- TEST_F(OptimizingCFITest, isa) { \
- std::vector<uint8_t> expected_asm( \
- expected_asm_##isa, \
- expected_asm_##isa + arraysize(expected_asm_##isa)); \
- std::vector<uint8_t> expected_cfi( \
- expected_cfi_##isa, \
- expected_cfi_##isa + arraysize(expected_cfi_##isa)); \
- TestImpl(isa, #isa, expected_asm, expected_cfi); \
+#define TEST_ISA(isa) \
+ TEST_F(OptimizingCFITest, isa) { \
+ std::vector<uint8_t> expected_asm( \
+ expected_asm_##isa, \
+ expected_asm_##isa + arraysize(expected_asm_##isa)); \
+ std::vector<uint8_t> expected_cfi( \
+ expected_cfi_##isa, \
+ expected_cfi_##isa + arraysize(expected_cfi_##isa)); \
+ TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi); \
}
#ifdef ART_ENABLE_CODEGEN_arm
@@ -203,7 +204,7 @@ TEST_F(OptimizingCFITest, kThumb2Adjust) {
std::vector<uint8_t> expected_cfi(
expected_cfi_kThumb2_adjust,
expected_cfi_kThumb2_adjust + arraysize(expected_cfi_kThumb2_adjust));
- SetUpFrame(kThumb2);
+ SetUpFrame(InstructionSet::kThumb2);
#define __ down_cast<arm::ArmVIXLAssembler*>(GetCodeGenerator() \
->GetAssembler())->GetVIXLAssembler()->
vixl32::Label target;
@@ -215,7 +216,7 @@ TEST_F(OptimizingCFITest, kThumb2Adjust) {
__ Bind(&target);
#undef __
Finish();
- Check(kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
+ Check(InstructionSet::kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
}
#endif
@@ -234,7 +235,7 @@ TEST_F(OptimizingCFITest, kMipsAdjust) {
std::vector<uint8_t> expected_cfi(
expected_cfi_kMips_adjust,
expected_cfi_kMips_adjust + arraysize(expected_cfi_kMips_adjust));
- SetUpFrame(kMips);
+ SetUpFrame(InstructionSet::kMips);
#define __ down_cast<mips::MipsAssembler*>(GetCodeGenerator()->GetAssembler())->
mips::MipsLabel target;
__ Beqz(mips::A0, &target);
@@ -245,7 +246,7 @@ TEST_F(OptimizingCFITest, kMipsAdjust) {
__ Bind(&target);
#undef __
Finish();
- Check(kMips, "kMips_adjust", expected_asm, expected_cfi);
+ Check(InstructionSet::kMips, "kMips_adjust", expected_asm, expected_cfi);
}
#endif
@@ -264,7 +265,7 @@ TEST_F(OptimizingCFITest, kMips64Adjust) {
std::vector<uint8_t> expected_cfi(
expected_cfi_kMips64_adjust,
expected_cfi_kMips64_adjust + arraysize(expected_cfi_kMips64_adjust));
- SetUpFrame(kMips64);
+ SetUpFrame(InstructionSet::kMips64);
#define __ down_cast<mips64::Mips64Assembler*>(GetCodeGenerator()->GetAssembler())->
mips64::Mips64Label target;
__ Beqc(mips64::A1, mips64::A2, &target);
@@ -275,7 +276,7 @@ TEST_F(OptimizingCFITest, kMips64Adjust) {
__ Bind(&target);
#undef __
Finish();
- Check(kMips64, "kMips64_adjust", expected_asm, expected_cfi);
+ Check(InstructionSet::kMips64, "kMips64_adjust", expected_asm, expected_cfi);
}
#endif
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index fde55cb92f..1e82c4b0f7 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -330,10 +330,10 @@ static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
static constexpr uint8_t expected_asm_kMips_adjust_head[] = {
0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB1, 0xAF,
0x34, 0x00, 0xB0, 0xAF, 0x28, 0x00, 0xB6, 0xF7, 0x20, 0x00, 0xB4, 0xF7,
- 0x08, 0x00, 0x80, 0x14, 0xFC, 0xFF, 0xBD, 0x27,
+ 0x08, 0x00, 0x80, 0x14, 0xF0, 0xFF, 0xBD, 0x27,
0x00, 0x00, 0xBF, 0xAF, 0x00, 0x00, 0x10, 0x04, 0x02, 0x00, 0x01, 0x3C,
0x18, 0x00, 0x21, 0x34, 0x21, 0x08, 0x3F, 0x00, 0x00, 0x00, 0xBF, 0x8F,
- 0x09, 0x00, 0x20, 0x00, 0x04, 0x00, 0xBD, 0x27,
+ 0x09, 0x00, 0x20, 0x00, 0x10, 0x00, 0xBD, 0x27,
};
static constexpr uint8_t expected_asm_kMips_adjust_tail[] = {
0x3C, 0x00, 0xBF, 0x8F, 0x38, 0x00, 0xB1, 0x8F, 0x34, 0x00, 0xB0, 0x8F,
@@ -342,7 +342,7 @@ static constexpr uint8_t expected_asm_kMips_adjust_tail[] = {
};
static constexpr uint8_t expected_cfi_kMips_adjust[] = {
0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x91, 0x02, 0x44, 0x90, 0x03,
- 0x50, 0x0E, 0x44, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
+ 0x50, 0x0E, 0x50, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B, 0x0E, 0x40,
};
// 0x00000000: addiu sp, sp, -64
@@ -356,8 +356,8 @@ static constexpr uint8_t expected_cfi_kMips_adjust[] = {
// 0x00000010: sdc1 f22, +40(sp)
// 0x00000014: sdc1 f20, +32(sp)
// 0x00000018: bnez a0, 0x0000003c ; +36
-// 0x0000001c: addiu sp, sp, -4
-// 0x00000020: .cfi_def_cfa_offset: 68
+// 0x0000001c: addiu sp, sp, -16
+// 0x00000020: .cfi_def_cfa_offset: 80
// 0x00000020: sw ra, +0(sp)
// 0x00000024: nal
// 0x00000028: lui at, 2
@@ -365,7 +365,7 @@ static constexpr uint8_t expected_cfi_kMips_adjust[] = {
// 0x00000030: addu at, at, ra
// 0x00000034: lw ra, +0(sp)
// 0x00000038: jr at
-// 0x0000003c: addiu sp, sp, 4
+// 0x0000003c: addiu sp, sp, 16
// 0x00000040: .cfi_def_cfa_offset: 64
// 0x00000040: nop
// ...
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 42f32b7866..9233eb5baf 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -437,13 +437,13 @@ bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
}
static bool IsInstructionSetSupported(InstructionSet instruction_set) {
- return instruction_set == kArm
- || instruction_set == kArm64
- || instruction_set == kThumb2
- || instruction_set == kMips
- || instruction_set == kMips64
- || instruction_set == kX86
- || instruction_set == kX86_64;
+ return instruction_set == InstructionSet::kArm
+ || instruction_set == InstructionSet::kArm64
+ || instruction_set == InstructionSet::kThumb2
+ || instruction_set == InstructionSet::kMips
+ || instruction_set == InstructionSet::kMips64
+ || instruction_set == InstructionSet::kX86
+ || instruction_set == InstructionSet::kX86_64;
}
// Strip pass name suffix to get optimization name.
@@ -637,8 +637,8 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
ArenaAllocator* allocator = graph->GetAllocator();
switch (instruction_set) {
#if defined(ART_ENABLE_CODEGEN_arm)
- case kThumb2:
- case kArm: {
+ case InstructionSet::kThumb2:
+ case InstructionSet::kArm: {
arm::InstructionSimplifierArm* simplifier =
new (allocator) arm::InstructionSimplifierArm(graph, stats);
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -657,7 +657,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64: {
+ case InstructionSet::kArm64: {
arm64::InstructionSimplifierArm64* simplifier =
new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -676,7 +676,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips: {
+ case InstructionSet::kMips: {
mips::InstructionSimplifierMips* simplifier =
new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -695,7 +695,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- case kMips64: {
+ case InstructionSet::kMips64: {
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
GVNOptimization* gvn =
new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -708,7 +708,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- case kX86: {
+ case InstructionSet::kX86: {
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
GVNOptimization* gvn =
new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -727,7 +727,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- case kX86_64: {
+ case InstructionSet::kX86_64: {
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
GVNOptimization* gvn =
new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -949,7 +949,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
// Always use the Thumb-2 assembler: some runtime functionality
// (like implicit stack overflow checks) assume Thumb-2.
- DCHECK_NE(instruction_set, kArm);
+ DCHECK_NE(instruction_set, InstructionSet::kArm);
// Do not attempt to compile on architectures we do not support.
if (!IsInstructionSetSupported(instruction_set)) {
@@ -1142,6 +1142,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
method = Emit(&allocator, &code_allocator, codegen.get(), compiler_driver, code_item);
if (kArenaAllocatorCountAllocations) {
+ codegen.reset(); // Release codegen's ScopedArenaAllocator for memory accounting.
size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
MemStats mem_stats(allocator.GetMemStats());
@@ -1251,18 +1252,6 @@ bool OptimizingCompiler::JitCompile(Thread* self,
if (codegen.get() == nullptr) {
return false;
}
-
- if (kArenaAllocatorCountAllocations) {
- size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
- if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
- MemStats mem_stats(allocator.GetMemStats());
- MemStats peak_stats(arena_stack.GetPeakStats());
- LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
- << dex_file->PrettyMethod(method_idx)
- << "\n" << Dumpable<MemStats>(mem_stats)
- << "\n" << Dumpable<MemStats>(peak_stats);
- }
- }
}
size_t stack_map_size = 0;
@@ -1357,6 +1346,19 @@ bool OptimizingCompiler::JitCompile(Thread* self,
jit_logger->WriteLog(code, code_allocator.GetSize(), method);
}
+ if (kArenaAllocatorCountAllocations) {
+ codegen.reset(); // Release codegen's ScopedArenaAllocator for memory accounting.
+ size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
+ if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
+ MemStats mem_stats(allocator.GetMemStats());
+ MemStats peak_stats(arena_stack.GetPeakStats());
+ LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
+ << dex_file->PrettyMethod(method_idx)
+ << "\n" << Dumpable<MemStats>(mem_stats)
+ << "\n" << Dumpable<MemStats>(peak_stats);
+ }
+ }
+
return true;
}
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index cb9dc42572..7246129e25 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -612,7 +612,7 @@ void ReferenceTypePropagation::RTPVisitor::UpdateFieldAccessTypeInfo(HInstructio
// The field is unknown only during tests.
if (info.GetField() != nullptr) {
- klass = info.GetField()->GetType<false>();
+ klass = info.GetField()->LookupType();
}
SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 5ed9e0243f..1d3fe0334d 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -16,6 +16,7 @@
#include "register_allocation_resolver.h"
+#include "base/bit_vector-inl.h"
#include "code_generator.h"
#include "linear_order.h"
#include "ssa_liveness_analysis.h"
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 86e971353f..bad73e1b61 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -70,13 +70,13 @@ RegisterAllocator::~RegisterAllocator() {
bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
InstructionSet instruction_set) {
- return instruction_set == kArm
- || instruction_set == kArm64
- || instruction_set == kMips
- || instruction_set == kMips64
- || instruction_set == kThumb2
- || instruction_set == kX86
- || instruction_set == kX86_64;
+ return instruction_set == InstructionSet::kArm
+ || instruction_set == InstructionSet::kArm64
+ || instruction_set == InstructionSet::kMips
+ || instruction_set == InstructionSet::kMips64
+ || instruction_set == InstructionSet::kThumb2
+ || instruction_set == InstructionSet::kX86
+ || instruction_set == InstructionSet::kX86_64;
}
class AllRangesIterator : public ValueObject {
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 57eb7623b1..8cc376c3a6 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -796,7 +796,7 @@ void HInstructionScheduling::Run(bool only_optimize_loop_blocks,
switch (instruction_set_) {
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64: {
+ case InstructionSet::kArm64: {
arm64::HSchedulerARM64 scheduler(&allocator, selector);
scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
scheduler.Schedule(graph_);
@@ -804,8 +804,8 @@ void HInstructionScheduling::Run(bool only_optimize_loop_blocks,
}
#endif
#if defined(ART_ENABLE_CODEGEN_arm)
- case kThumb2:
- case kArm: {
+ case InstructionSet::kThumb2:
+ case InstructionSet::kArm: {
arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
arm::HSchedulerARM scheduler(&allocator, selector, &arm_latency_visitor);
scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index dfc1633fe6..75dce81550 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -43,22 +43,22 @@ static ::std::vector<CodegenTargetConfig> GetTargetConfigs() {
::std::vector<CodegenTargetConfig> test_config_candidates = {
#ifdef ART_ENABLE_CODEGEN_arm
// TODO: Should't this be `kThumb2` instead of `kArm` here?
- CodegenTargetConfig(kArm, create_codegen_arm_vixl32),
+ CodegenTargetConfig(InstructionSet::kArm, create_codegen_arm_vixl32),
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- CodegenTargetConfig(kArm64, create_codegen_arm64),
+ CodegenTargetConfig(InstructionSet::kArm64, create_codegen_arm64),
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- CodegenTargetConfig(kX86, create_codegen_x86),
+ CodegenTargetConfig(InstructionSet::kX86, create_codegen_x86),
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- CodegenTargetConfig(kX86_64, create_codegen_x86_64),
+ CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64),
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- CodegenTargetConfig(kMips, create_codegen_mips),
+ CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips),
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- CodegenTargetConfig(kMips64, create_codegen_mips64)
+ CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64)
#endif
};
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 7eb2188a28..9bc80457a3 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -43,9 +43,12 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
current_entry_.dex_method_index = dex::kDexNoIndex;
current_entry_.dex_register_entry.num_dex_registers = num_dex_registers;
current_entry_.dex_register_entry.locations_start_index = dex_register_locations_.size();
- current_entry_.dex_register_entry.live_dex_registers_mask = (num_dex_registers != 0)
- ? ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream)
- : nullptr;
+ current_entry_.dex_register_entry.live_dex_registers_mask = nullptr;
+ if (num_dex_registers != 0u) {
+ current_entry_.dex_register_entry.live_dex_registers_mask =
+ ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
+ current_entry_.dex_register_entry.live_dex_registers_mask->ClearAllBits();
+ }
if (sp_mask != nullptr) {
stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
}
@@ -121,9 +124,12 @@ void StackMapStream::BeginInlineInfoEntry(ArtMethod* method,
current_inline_info_.dex_pc = dex_pc;
current_inline_info_.dex_register_entry.num_dex_registers = num_dex_registers;
current_inline_info_.dex_register_entry.locations_start_index = dex_register_locations_.size();
- current_inline_info_.dex_register_entry.live_dex_registers_mask = (num_dex_registers != 0)
- ? ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream)
- : nullptr;
+ current_inline_info_.dex_register_entry.live_dex_registers_mask = nullptr;
+ if (num_dex_registers != 0) {
+ current_inline_info_.dex_register_entry.live_dex_registers_mask =
+ ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
+ current_inline_info_.dex_register_entry.live_dex_registers_mask->ClearAllBits();
+ }
current_dex_register_ = 0;
}
@@ -468,7 +474,7 @@ size_t StackMapStream::AddDexRegisterMapEntry(const DexRegisterMapEntry& entry)
if (entries_it == dex_map_hash_to_stack_map_indices_.end()) {
// We don't have a perfect hash functions so we need a list to collect all stack maps
// which might have the same dex register map.
- ArenaVector<uint32_t> stack_map_indices(allocator_->Adapter(kArenaAllocStackMapStream));
+ ScopedArenaVector<uint32_t> stack_map_indices(allocator_->Adapter(kArenaAllocStackMapStream));
stack_map_indices.push_back(current_entry_index);
dex_map_hash_to_stack_map_indices_.Put(entry.hash, std::move(stack_map_indices));
} else {
@@ -546,7 +552,7 @@ void StackMapStream::CheckDexRegisterMap(const CodeInfo& code_info,
size_t StackMapStream::PrepareRegisterMasks() {
register_masks_.resize(stack_maps_.size(), 0u);
- ArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
+ ScopedArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
for (StackMapEntry& stack_map : stack_maps_) {
const size_t index = dedupe.size();
stack_map.register_mask_index = dedupe.emplace(stack_map.register_mask, index).first->second;
@@ -558,7 +564,7 @@ size_t StackMapStream::PrepareRegisterMasks() {
void StackMapStream::PrepareMethodIndices() {
CHECK(method_indices_.empty());
method_indices_.resize(stack_maps_.size() + inline_infos_.size());
- ArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
+ ScopedArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
for (StackMapEntry& stack_map : stack_maps_) {
const size_t index = dedupe.size();
const uint32_t method_index = stack_map.dex_method_index;
@@ -584,11 +590,11 @@ size_t StackMapStream::PrepareStackMasks(size_t entry_size_in_bits) {
stack_masks_.resize(byte_entry_size * stack_maps_.size(), 0u);
// For deduplicating we store the stack masks as byte packed for simplicity. We can bit pack later
// when copying out from stack_masks_.
- ArenaUnorderedMap<MemoryRegion,
- size_t,
- FNVHash<MemoryRegion>,
- MemoryRegion::ContentEquals> dedup(
- stack_maps_.size(), allocator_->Adapter(kArenaAllocStackMapStream));
+ ScopedArenaUnorderedMap<MemoryRegion,
+ size_t,
+ FNVHash<MemoryRegion>,
+ MemoryRegion::ContentEquals> dedup(
+ stack_maps_.size(), allocator_->Adapter(kArenaAllocStackMapStream));
for (StackMapEntry& stack_map : stack_maps_) {
size_t index = dedup.size();
MemoryRegion stack_mask(stack_masks_.data() + index * byte_entry_size, byte_entry_size);
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 62ed7ee0e5..e126609dba 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -17,9 +17,9 @@
#ifndef ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
#define ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
-#include "base/arena_containers.h"
#include "base/bit_vector-inl.h"
#include "base/hash_map.h"
+#include "base/scoped_arena_containers.h"
#include "base/value_object.h"
#include "memory_region.h"
#include "method_info.h"
@@ -60,8 +60,7 @@ class DexRegisterLocationHashFn {
*/
class StackMapStream : public ValueObject {
public:
- explicit StackMapStream(ArenaAllocator* allocator,
- InstructionSet instruction_set)
+ explicit StackMapStream(ScopedArenaAllocator* allocator, InstructionSet instruction_set)
: allocator_(allocator),
instruction_set_(instruction_set),
stack_maps_(allocator->Adapter(kArenaAllocStackMapStream)),
@@ -223,37 +222,37 @@ class StackMapStream : public ValueObject {
size_t dex_register_locations_index) const;
void CheckCodeInfo(MemoryRegion region) const;
- ArenaAllocator* const allocator_;
+ ScopedArenaAllocator* const allocator_;
const InstructionSet instruction_set_;
- ArenaVector<StackMapEntry> stack_maps_;
+ ScopedArenaVector<StackMapEntry> stack_maps_;
// A catalog of unique [location_kind, register_value] pairs (per method).
- ArenaVector<DexRegisterLocation> location_catalog_entries_;
+ ScopedArenaVector<DexRegisterLocation> location_catalog_entries_;
// Map from Dex register location catalog entries to their indices in the
// location catalog.
- using LocationCatalogEntriesIndices = ArenaHashMap<DexRegisterLocation,
- size_t,
- LocationCatalogEntriesIndicesEmptyFn,
- DexRegisterLocationHashFn>;
+ using LocationCatalogEntriesIndices = ScopedArenaHashMap<DexRegisterLocation,
+ size_t,
+ LocationCatalogEntriesIndicesEmptyFn,
+ DexRegisterLocationHashFn>;
LocationCatalogEntriesIndices location_catalog_entries_indices_;
// A set of concatenated maps of Dex register locations indices to `location_catalog_entries_`.
- ArenaVector<size_t> dex_register_locations_;
- ArenaVector<InlineInfoEntry> inline_infos_;
- ArenaVector<uint8_t> stack_masks_;
- ArenaVector<uint32_t> register_masks_;
- ArenaVector<uint32_t> method_indices_;
- ArenaVector<DexRegisterMapEntry> dex_register_entries_;
+ ScopedArenaVector<size_t> dex_register_locations_;
+ ScopedArenaVector<InlineInfoEntry> inline_infos_;
+ ScopedArenaVector<uint8_t> stack_masks_;
+ ScopedArenaVector<uint32_t> register_masks_;
+ ScopedArenaVector<uint32_t> method_indices_;
+ ScopedArenaVector<DexRegisterMapEntry> dex_register_entries_;
int stack_mask_max_;
uint32_t dex_pc_max_;
uint32_t register_mask_max_;
size_t number_of_stack_maps_with_inline_info_;
- ArenaSafeMap<uint32_t, ArenaVector<uint32_t>> dex_map_hash_to_stack_map_indices_;
+ ScopedArenaSafeMap<uint32_t, ScopedArenaVector<uint32_t>> dex_map_hash_to_stack_map_indices_;
StackMapEntry current_entry_;
InlineInfoEntry current_inline_info_;
- ArenaVector<uint8_t> code_info_encoding_;
+ ScopedArenaVector<uint8_t> code_info_encoding_;
size_t needed_size_;
uint32_t current_dex_register_;
bool in_inline_frame_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 96ac368ac3..7e517f3485 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -47,7 +47,8 @@ using Kind = DexRegisterLocation::Kind;
TEST(StackMapTest, Test1) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, false);
@@ -128,7 +129,8 @@ TEST(StackMapTest, Test1) {
TEST(StackMapTest, Test2) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArtMethod art_method;
@@ -412,7 +414,8 @@ TEST(StackMapTest, Test2) {
TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArtMethod art_method;
@@ -506,7 +509,8 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
TEST(StackMapTest, TestNonLiveDexRegisters) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, false);
@@ -585,7 +589,8 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
// not treat it as kNoDexRegisterMap.
TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, false);
@@ -648,7 +653,8 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
TEST(StackMapTest, TestShareDexRegisterMap) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, false);
@@ -706,7 +712,8 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
TEST(StackMapTest, TestNoDexRegisterMap) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, false);
@@ -755,7 +762,8 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
TEST(StackMapTest, InlineTest) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArtMethod art_method;
@@ -920,23 +928,30 @@ TEST(StackMapTest, InlineTest) {
TEST(StackMapTest, CodeOffsetTest) {
// Test minimum alignments, encoding, and decoding.
- CodeOffset offset_thumb2 = CodeOffset::FromOffset(kThumb2InstructionAlignment, kThumb2);
- CodeOffset offset_arm64 = CodeOffset::FromOffset(kArm64InstructionAlignment, kArm64);
- CodeOffset offset_x86 = CodeOffset::FromOffset(kX86InstructionAlignment, kX86);
- CodeOffset offset_x86_64 = CodeOffset::FromOffset(kX86_64InstructionAlignment, kX86_64);
- CodeOffset offset_mips = CodeOffset::FromOffset(kMipsInstructionAlignment, kMips);
- CodeOffset offset_mips64 = CodeOffset::FromOffset(kMips64InstructionAlignment, kMips64);
- EXPECT_EQ(offset_thumb2.Uint32Value(kThumb2), kThumb2InstructionAlignment);
- EXPECT_EQ(offset_arm64.Uint32Value(kArm64), kArm64InstructionAlignment);
- EXPECT_EQ(offset_x86.Uint32Value(kX86), kX86InstructionAlignment);
- EXPECT_EQ(offset_x86_64.Uint32Value(kX86_64), kX86_64InstructionAlignment);
- EXPECT_EQ(offset_mips.Uint32Value(kMips), kMipsInstructionAlignment);
- EXPECT_EQ(offset_mips64.Uint32Value(kMips64), kMips64InstructionAlignment);
+ CodeOffset offset_thumb2 =
+ CodeOffset::FromOffset(kThumb2InstructionAlignment, InstructionSet::kThumb2);
+ CodeOffset offset_arm64 =
+ CodeOffset::FromOffset(kArm64InstructionAlignment, InstructionSet::kArm64);
+ CodeOffset offset_x86 =
+ CodeOffset::FromOffset(kX86InstructionAlignment, InstructionSet::kX86);
+ CodeOffset offset_x86_64 =
+ CodeOffset::FromOffset(kX86_64InstructionAlignment, InstructionSet::kX86_64);
+ CodeOffset offset_mips =
+ CodeOffset::FromOffset(kMipsInstructionAlignment, InstructionSet::kMips);
+ CodeOffset offset_mips64 =
+ CodeOffset::FromOffset(kMips64InstructionAlignment, InstructionSet::kMips64);
+ EXPECT_EQ(offset_thumb2.Uint32Value(InstructionSet::kThumb2), kThumb2InstructionAlignment);
+ EXPECT_EQ(offset_arm64.Uint32Value(InstructionSet::kArm64), kArm64InstructionAlignment);
+ EXPECT_EQ(offset_x86.Uint32Value(InstructionSet::kX86), kX86InstructionAlignment);
+ EXPECT_EQ(offset_x86_64.Uint32Value(InstructionSet::kX86_64), kX86_64InstructionAlignment);
+ EXPECT_EQ(offset_mips.Uint32Value(InstructionSet::kMips), kMipsInstructionAlignment);
+ EXPECT_EQ(offset_mips64.Uint32Value(InstructionSet::kMips64), kMips64InstructionAlignment);
}
TEST(StackMapTest, TestDeduplicateStackMask) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, true);
@@ -964,7 +979,8 @@ TEST(StackMapTest, TestDeduplicateStackMask) {
TEST(StackMapTest, TestInvokeInfo) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, true);
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 9527a60976..921d401849 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -247,15 +247,15 @@ std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet is
ArenaAllocator allocator(&pool);
switch (isa) {
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64:
+ case InstructionSet::kArm64:
return arm64::CreateTrampoline(&allocator, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- case kMips64:
+ case InstructionSet::kMips64:
return mips64::CreateTrampoline(&allocator, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- case kX86_64:
+ case InstructionSet::kX86_64:
return x86_64::CreateTrampoline(&allocator, offset);
#endif
default:
@@ -273,16 +273,16 @@ std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet is
ArenaAllocator allocator(&pool);
switch (isa) {
#ifdef ART_ENABLE_CODEGEN_arm
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return arm::CreateTrampoline(&allocator, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips:
+ case InstructionSet::kMips:
return mips::CreateTrampoline(&allocator, abi, offset);
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- case kX86:
+ case InstructionSet::kX86:
UNUSED(abi);
return x86::CreateTrampoline(&allocator, offset);
#endif
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index e239004506..c13c9af819 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -232,7 +232,7 @@ class ArmVIXLJNIMacroAssembler FINAL
class ArmVIXLJNIMacroLabel FINAL
: public JNIMacroLabelCommon<ArmVIXLJNIMacroLabel,
vixl32::Label,
- kArm> {
+ InstructionSet::kArm> {
public:
vixl32::Label* AsArm() {
return AsPlatformLabel();
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index fda87aa573..ce39a13692 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -235,7 +235,7 @@ class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler,
class Arm64JNIMacroLabel FINAL
: public JNIMacroLabelCommon<Arm64JNIMacroLabel,
vixl::aarch64::Label,
- kArm64> {
+ InstructionSet::kArm64> {
public:
vixl::aarch64::Label* AsArm64() {
return AsPlatformLabel();
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index ae7636b106..ad84412ef5 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -650,6 +650,24 @@ class AssemblerTest : public testing::Test {
}
template <typename ImmType>
+ std::string RepeatRVIb(void (Ass::*f)(Reg, VecReg, ImmType),
+ int imm_bits,
+ const std::string& fmt,
+ int bias = 0,
+ int multiplier = 1) {
+ return RepeatTemplatedRegistersImmBits<Reg, VecReg, ImmType>(
+ f,
+ imm_bits,
+ GetRegisters(),
+ GetVectorRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetVecRegName,
+ fmt,
+ bias,
+ multiplier);
+ }
+
+ template <typename ImmType>
std::string RepeatVVIb(void (Ass::*f)(VecReg, VecReg, ImmType),
int imm_bits,
const std::string& fmt,
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 5307d17bb0..655d17d4fb 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -81,7 +81,7 @@ std::string GetToolsDir() {
if (toolsdir.empty()) {
setup_results();
- toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(kThumb2);
+ toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(InstructionSet::kThumb2);
SetAndroidData();
}
@@ -215,10 +215,10 @@ TEST_F(ArmVIXLAssemblerTest, VixlJniHelpers) {
is_synchronized,
is_critical_native,
shorty,
- kThumb2));
+ InstructionSet::kThumb2));
std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
ManagedRuntimeCallingConvention::Create(
- &allocator, is_static, is_synchronized, shorty, kThumb2));
+ &allocator, is_static, is_synchronized, shorty, InstructionSet::kThumb2));
const int frame_size(jni_conv->FrameSize());
ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 0616b35a39..3f7691b6a8 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -56,12 +56,12 @@ MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create(
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return MacroAsm32UniquePtr(new (allocator) arm::ArmVIXLJNIMacroAssembler(allocator));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips:
+ case InstructionSet::kMips:
return MacroAsm32UniquePtr(new (allocator) mips::MipsAssembler(
allocator,
instruction_set_features != nullptr
@@ -69,7 +69,7 @@ MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create(
: nullptr));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- case kX86:
+ case InstructionSet::kX86:
return MacroAsm32UniquePtr(new (allocator) x86::X86JNIMacroAssembler(allocator));
#endif
default:
@@ -91,11 +91,11 @@ MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create(
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64:
+ case InstructionSet::kArm64:
return MacroAsm64UniquePtr(new (allocator) arm64::Arm64JNIMacroAssembler(allocator));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- case kMips64:
+ case InstructionSet::kMips64:
return MacroAsm64UniquePtr(new (allocator) mips64::Mips64Assembler(
allocator,
instruction_set_features != nullptr
@@ -103,7 +103,7 @@ MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create(
: nullptr));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- case kX86_64:
+ case InstructionSet::kX86_64:
return MacroAsm64UniquePtr(new (allocator) x86_64::X86_64JNIMacroAssembler(allocator));
#endif
default:
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index e85645b446..9545ca6869 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -1863,20 +1863,20 @@ void MipsAssembler::Not(Register rd, Register rs) {
}
void MipsAssembler::Push(Register rs) {
- IncreaseFrameSize(kMipsWordSize);
+ IncreaseFrameSize(kStackAlignment);
Sw(rs, SP, 0);
}
void MipsAssembler::Pop(Register rd) {
Lw(rd, SP, 0);
- DecreaseFrameSize(kMipsWordSize);
+ DecreaseFrameSize(kStackAlignment);
}
void MipsAssembler::PopAndReturn(Register rd, Register rt) {
bool reordering = SetReorder(false);
Lw(rd, SP, 0);
Jr(rt);
- DecreaseFrameSize(kMipsWordSize); // Single instruction in delay slot.
+ DecreaseFrameSize(kStackAlignment); // Single instruction in delay slot.
SetReorder(reordering);
}
@@ -2800,6 +2800,74 @@ void MipsAssembler::SplatiD(VectorRegister wd, VectorRegister ws, int n1) {
static_cast<FRegister>(ws));
}
+void MipsAssembler::Copy_sB(Register rd, VectorRegister ws, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ DsFsmInstrRf(EmitMsaELM(0x2, n4 | kMsaDfNByteMask, ws, static_cast<VectorRegister>(rd), 0x19),
+ rd,
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::Copy_sH(Register rd, VectorRegister ws, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ DsFsmInstrRf(EmitMsaELM(0x2, n3 | kMsaDfNHalfwordMask, ws, static_cast<VectorRegister>(rd), 0x19),
+ rd,
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::Copy_sW(Register rd, VectorRegister ws, int n2) {
+ CHECK(HasMsa());
+ CHECK(IsUint<2>(n2)) << n2;
+ DsFsmInstrRf(EmitMsaELM(0x2, n2 | kMsaDfNWordMask, ws, static_cast<VectorRegister>(rd), 0x19),
+ rd,
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::Copy_uB(Register rd, VectorRegister ws, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ DsFsmInstrRf(EmitMsaELM(0x3, n4 | kMsaDfNByteMask, ws, static_cast<VectorRegister>(rd), 0x19),
+ rd,
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::Copy_uH(Register rd, VectorRegister ws, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ DsFsmInstrRf(EmitMsaELM(0x3, n3 | kMsaDfNHalfwordMask, ws, static_cast<VectorRegister>(rd), 0x19),
+ rd,
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::InsertB(VectorRegister wd, Register rs, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ DsFsmInstrFffr(EmitMsaELM(0x4, n4 | kMsaDfNByteMask, static_cast<VectorRegister>(rs), wd, 0x19),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::InsertH(VectorRegister wd, Register rs, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ DsFsmInstrFffr(
+ EmitMsaELM(0x4, n3 | kMsaDfNHalfwordMask, static_cast<VectorRegister>(rs), wd, 0x19),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::InsertW(VectorRegister wd, Register rs, int n2) {
+ CHECK(HasMsa());
+ CHECK(IsUint<2>(n2)) << n2;
+ DsFsmInstrFffr(EmitMsaELM(0x4, n2 | kMsaDfNWordMask, static_cast<VectorRegister>(rs), wd, 0x19),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
void MipsAssembler::FillB(VectorRegister wd, Register rs) {
CHECK(HasMsa());
DsFsmInstrFr(EmitMsa2R(0xc0, 0x0, static_cast<VectorRegister>(rs), wd, 0x1e),
@@ -2921,6 +2989,38 @@ void MipsAssembler::StD(VectorRegister wd, Register rs, int offset) {
rs);
}
+void MipsAssembler::IlvlB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvlH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvlW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvlD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
void MipsAssembler::IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
CHECK(HasMsa());
DsFsmInstrFff(EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x14),
@@ -2953,6 +3053,70 @@ void MipsAssembler::IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister w
static_cast<FRegister>(wt));
}
+void MipsAssembler::IlvevB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvevH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvevW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvevD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvodB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvodH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvodW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvodD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
void MipsAssembler::MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
CHECK(HasMsa());
DsFsmInstrFff(EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x12),
@@ -3049,6 +3213,54 @@ void MipsAssembler::FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister
static_cast<FRegister>(wt));
}
+void MipsAssembler::Hadd_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x15),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Hadd_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x15),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Hadd_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x15),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Hadd_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x15),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x15),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x15),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
void MipsAssembler::ReplicateFPToVectorRegister(VectorRegister dst,
FRegister src,
bool is_double) {
@@ -4376,7 +4588,7 @@ void MipsAssembler::EmitBranch(uint32_t branch_id) {
Addu(AT, AT, RA);
Lw(RA, SP, 0);
Jr(AT);
- DecreaseFrameSize(kMipsWordSize);
+ DecreaseFrameSize(kStackAlignment);
break;
case Branch::kLongCondBranch:
// The comment on case 'Branch::kLongUncondBranch' applies here as well.
@@ -4396,7 +4608,7 @@ void MipsAssembler::EmitBranch(uint32_t branch_id) {
Addu(AT, AT, RA);
Lw(RA, SP, 0);
Jr(AT);
- DecreaseFrameSize(kMipsWordSize);
+ DecreaseFrameSize(kStackAlignment);
break;
case Branch::kLongCall:
DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 1c5b442557..c0ea29fbd7 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -601,6 +601,14 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void SplatiH(VectorRegister wd, VectorRegister ws, int n3);
void SplatiW(VectorRegister wd, VectorRegister ws, int n2);
void SplatiD(VectorRegister wd, VectorRegister ws, int n1);
+ void Copy_sB(Register rd, VectorRegister ws, int n4);
+ void Copy_sH(Register rd, VectorRegister ws, int n3);
+ void Copy_sW(Register rd, VectorRegister ws, int n2);
+ void Copy_uB(Register rd, VectorRegister ws, int n4);
+ void Copy_uH(Register rd, VectorRegister ws, int n3);
+ void InsertB(VectorRegister wd, Register rs, int n4);
+ void InsertH(VectorRegister wd, Register rs, int n3);
+ void InsertW(VectorRegister wd, Register rs, int n2);
void FillB(VectorRegister wd, Register rs);
void FillH(VectorRegister wd, Register rs);
void FillW(VectorRegister wd, Register rs);
@@ -618,10 +626,22 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void StW(VectorRegister wd, Register rs, int offset);
void StD(VectorRegister wd, Register rs, int offset);
+ void IlvlB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvlH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvlW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvlD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void MaddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
@@ -636,6 +656,13 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void FmsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
// Helper for replicating floating point value in all destination elements.
void ReplicateFPToVectorRegister(VectorRegister dst, FRegister src, bool is_double);
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index b12b6b651c..c76a568ddd 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -2185,6 +2185,46 @@ TEST_F(AssemblerMIPS32r6Test, SplatiD) {
"splati.d");
}
+TEST_F(AssemblerMIPS32r6Test, Copy_sB) {
+ DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_sB, 4, "copy_s.b ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Copy_sH) {
+ DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_sH, 3, "copy_s.h ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Copy_sW) {
+ DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_sW, 2, "copy_s.w ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Copy_uB) {
+ DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_uB, 4, "copy_u.b ${reg1}, ${reg2}[{imm}]"),
+ "copy_u.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Copy_uH) {
+ DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_uH, 3, "copy_u.h ${reg1}, ${reg2}[{imm}]"),
+ "copy_u.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, InsertB) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::InsertB, 4, "insert.b ${reg1}[{imm}], ${reg2}"),
+ "insert.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, InsertH) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::InsertH, 3, "insert.h ${reg1}[{imm}], ${reg2}"),
+ "insert.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, InsertW) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::InsertW, 2, "insert.w ${reg1}[{imm}], ${reg2}"),
+ "insert.w");
+}
+
TEST_F(AssemblerMIPS32r6Test, FillB) {
DriverStr(RepeatVR(&mips::MipsAssembler::FillB, "fill.b ${reg1}, ${reg2}"), "fill.b");
}
@@ -2251,6 +2291,22 @@ TEST_F(AssemblerMIPS32r6Test, StD) {
"st.d");
}
+TEST_F(AssemblerMIPS32r6Test, IlvlB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvlB, "ilvl.b ${reg1}, ${reg2}, ${reg3}"), "ilvl.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvlH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvlH, "ilvl.h ${reg1}, ${reg2}, ${reg3}"), "ilvl.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvlW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvlW, "ilvl.w ${reg1}, ${reg2}, ${reg3}"), "ilvl.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvlD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvlD, "ilvl.d ${reg1}, ${reg2}, ${reg3}"), "ilvl.d");
+}
+
TEST_F(AssemblerMIPS32r6Test, IlvrB) {
DriverStr(RepeatVVV(&mips::MipsAssembler::IlvrB, "ilvr.b ${reg1}, ${reg2}, ${reg3}"), "ilvr.b");
}
@@ -2267,6 +2323,46 @@ TEST_F(AssemblerMIPS32r6Test, IlvrD) {
DriverStr(RepeatVVV(&mips::MipsAssembler::IlvrD, "ilvr.d ${reg1}, ${reg2}, ${reg3}"), "ilvr.d");
}
+TEST_F(AssemblerMIPS32r6Test, IlvevB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvevB, "ilvev.b ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvevH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvevH, "ilvev.h ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvevW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvevW, "ilvev.w ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvevD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvevD, "ilvev.d ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvodB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvodB, "ilvod.b ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvodH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvodH, "ilvod.h ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvodW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvodW, "ilvod.w ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvodD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvodD, "ilvod.d ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.d");
+}
+
TEST_F(AssemblerMIPS32r6Test, MaddvB) {
DriverStr(RepeatVVV(&mips::MipsAssembler::MaddvB, "maddv.b ${reg1}, ${reg2}, ${reg3}"),
"maddv.b");
@@ -2287,6 +2383,36 @@ TEST_F(AssemblerMIPS32r6Test, MaddvD) {
"maddv.d");
}
+TEST_F(AssemblerMIPS32r6Test, Hadd_sH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_sH, "hadd_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_s.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Hadd_sW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_sW, "hadd_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_s.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Hadd_sD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_sD, "hadd_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_s.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Hadd_uH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_uH, "hadd_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_u.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Hadd_uW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_uW, "hadd_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_u.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Hadd_uD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_uD, "hadd_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_u.d");
+}
+
TEST_F(AssemblerMIPS32r6Test, MsubvB) {
DriverStr(RepeatVVV(&mips::MipsAssembler::MsubvB, "msubv.b ${reg1}, ${reg2}, ${reg3}"),
"msubv.b");
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index 9397be4c09..b027d3a549 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -2803,7 +2803,7 @@ TEST_F(AssemblerMIPSTest, LongBranchReorder) {
oss <<
".set noreorder\n"
"addiu $t0, $t1, 0x5678\n"
- "addiu $sp, $sp, -4\n"
+ "addiu $sp, $sp, -16\n"
"sw $ra, 0($sp)\n"
"bltzal $zero, .+4\n"
"lui $at, 0x" << std::hex << High16Bits(offset_forward) << "\n"
@@ -2811,11 +2811,11 @@ TEST_F(AssemblerMIPSTest, LongBranchReorder) {
"addu $at, $at, $ra\n"
"lw $ra, 0($sp)\n"
"jalr $zero, $at\n"
- "addiu $sp, $sp, 4\n" <<
+ "addiu $sp, $sp, 16\n" <<
RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") <<
RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") <<
"addiu $t0, $t1, 0x5678\n"
- "addiu $sp, $sp, -4\n"
+ "addiu $sp, $sp, -16\n"
"sw $ra, 0($sp)\n"
"bltzal $zero, .+4\n"
"lui $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
@@ -2823,7 +2823,7 @@ TEST_F(AssemblerMIPSTest, LongBranchReorder) {
"addu $at, $at, $ra\n"
"lw $ra, 0($sp)\n"
"jalr $zero, $at\n"
- "addiu $sp, $sp, 4\n";
+ "addiu $sp, $sp, 16\n";
std::string expected = oss.str();
DriverStr(expected, "LongBranchReorder");
EXPECT_EQ(__ GetLabelLocation(&patcher_label1), 0 * 4u);
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 606d4c39d0..d8a4531ac2 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -1874,6 +1874,72 @@ void Mips64Assembler::SplatiD(VectorRegister wd, VectorRegister ws, int n1) {
EmitMsaELM(0x1, n1 | kMsaDfNDoublewordMask, ws, wd, 0x19);
}
+void Mips64Assembler::Copy_sB(GpuRegister rd, VectorRegister ws, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ EmitMsaELM(0x2, n4 | kMsaDfNByteMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::Copy_sH(GpuRegister rd, VectorRegister ws, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ EmitMsaELM(0x2, n3 | kMsaDfNHalfwordMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::Copy_sW(GpuRegister rd, VectorRegister ws, int n2) {
+ CHECK(HasMsa());
+ CHECK(IsUint<2>(n2)) << n2;
+ EmitMsaELM(0x2, n2 | kMsaDfNWordMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::Copy_sD(GpuRegister rd, VectorRegister ws, int n1) {
+ CHECK(HasMsa());
+ CHECK(IsUint<1>(n1)) << n1;
+ EmitMsaELM(0x2, n1 | kMsaDfNDoublewordMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::Copy_uB(GpuRegister rd, VectorRegister ws, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ EmitMsaELM(0x3, n4 | kMsaDfNByteMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::Copy_uH(GpuRegister rd, VectorRegister ws, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ EmitMsaELM(0x3, n3 | kMsaDfNHalfwordMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::Copy_uW(GpuRegister rd, VectorRegister ws, int n2) {
+ CHECK(HasMsa());
+ CHECK(IsUint<2>(n2)) << n2;
+ EmitMsaELM(0x3, n2 | kMsaDfNWordMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::InsertB(VectorRegister wd, GpuRegister rs, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ EmitMsaELM(0x4, n4 | kMsaDfNByteMask, static_cast<VectorRegister>(rs), wd, 0x19);
+}
+
+void Mips64Assembler::InsertH(VectorRegister wd, GpuRegister rs, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ EmitMsaELM(0x4, n3 | kMsaDfNHalfwordMask, static_cast<VectorRegister>(rs), wd, 0x19);
+}
+
+void Mips64Assembler::InsertW(VectorRegister wd, GpuRegister rs, int n2) {
+ CHECK(HasMsa());
+ CHECK(IsUint<2>(n2)) << n2;
+ EmitMsaELM(0x4, n2 | kMsaDfNWordMask, static_cast<VectorRegister>(rs), wd, 0x19);
+}
+
+void Mips64Assembler::InsertD(VectorRegister wd, GpuRegister rs, int n1) {
+ CHECK(HasMsa());
+ CHECK(IsUint<1>(n1)) << n1;
+ EmitMsaELM(0x4, n1 | kMsaDfNDoublewordMask, static_cast<VectorRegister>(rs), wd, 0x19);
+}
+
void Mips64Assembler::FillB(VectorRegister wd, GpuRegister rs) {
CHECK(HasMsa());
EmitMsa2R(0xc0, 0x0, static_cast<VectorRegister>(rs), wd, 0x1e);
@@ -1972,6 +2038,26 @@ void Mips64Assembler::StD(VectorRegister wd, GpuRegister rs, int offset) {
EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x9, 0x3);
}
+void Mips64Assembler::IlvlB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvlH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvlW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvlD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x14);
+}
+
void Mips64Assembler::IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
CHECK(HasMsa());
EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x14);
@@ -1992,6 +2078,46 @@ void Mips64Assembler::IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister
EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x14);
}
+void Mips64Assembler::IlvevB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvevH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvevW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvevD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvodB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvodH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvodW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvodD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x14);
+}
+
void Mips64Assembler::MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
CHECK(HasMsa());
EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x12);
@@ -2052,6 +2178,36 @@ void Mips64Assembler::FmsubD(VectorRegister wd, VectorRegister ws, VectorRegiste
EmitMsa3R(0x2, 0x3, wt, ws, wd, 0x1b);
}
+void Mips64Assembler::Hadd_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x15);
+}
+
+void Mips64Assembler::Hadd_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x15);
+}
+
+void Mips64Assembler::Hadd_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x15);
+}
+
+void Mips64Assembler::Hadd_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x15);
+}
+
+void Mips64Assembler::Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x15);
+}
+
+void Mips64Assembler::Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x15);
+}
+
void Mips64Assembler::ReplicateFPToVectorRegister(VectorRegister dst,
FpuRegister src,
bool is_double) {
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index a3787ac6ae..d67fb0054d 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -785,6 +785,17 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void SplatiH(VectorRegister wd, VectorRegister ws, int n3);
void SplatiW(VectorRegister wd, VectorRegister ws, int n2);
void SplatiD(VectorRegister wd, VectorRegister ws, int n1);
+ void Copy_sB(GpuRegister rd, VectorRegister ws, int n4);
+ void Copy_sH(GpuRegister rd, VectorRegister ws, int n3);
+ void Copy_sW(GpuRegister rd, VectorRegister ws, int n2);
+ void Copy_sD(GpuRegister rd, VectorRegister ws, int n1);
+ void Copy_uB(GpuRegister rd, VectorRegister ws, int n4);
+ void Copy_uH(GpuRegister rd, VectorRegister ws, int n3);
+ void Copy_uW(GpuRegister rd, VectorRegister ws, int n2);
+ void InsertB(VectorRegister wd, GpuRegister rs, int n4);
+ void InsertH(VectorRegister wd, GpuRegister rs, int n3);
+ void InsertW(VectorRegister wd, GpuRegister rs, int n2);
+ void InsertD(VectorRegister wd, GpuRegister rs, int n1);
void FillB(VectorRegister wd, GpuRegister rs);
void FillH(VectorRegister wd, GpuRegister rs);
void FillW(VectorRegister wd, GpuRegister rs);
@@ -803,10 +814,22 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void StW(VectorRegister wd, GpuRegister rs, int offset);
void StD(VectorRegister wd, GpuRegister rs, int offset);
+ void IlvlB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvlH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvlW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvlD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void MaddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
@@ -821,6 +844,13 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void FmsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
// Helper for replicating floating point value in all destination elements.
void ReplicateFPToVectorRegister(VectorRegister dst, FpuRegister src, bool is_double);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index bf0326de87..164af7891c 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -3399,6 +3399,61 @@ TEST_F(AssemblerMIPS64Test, SplatiD) {
"splati.d");
}
+TEST_F(AssemblerMIPS64Test, Copy_sB) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_sB, 4, "copy_s.b ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Copy_sH) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_sH, 3, "copy_s.h ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Copy_sW) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_sW, 2, "copy_s.w ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Copy_sD) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_sD, 1, "copy_s.d ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Copy_uB) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_uB, 4, "copy_u.b ${reg1}, ${reg2}[{imm}]"),
+ "copy_u.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Copy_uH) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_uH, 3, "copy_u.h ${reg1}, ${reg2}[{imm}]"),
+ "copy_u.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Copy_uW) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_uW, 2, "copy_u.w ${reg1}, ${reg2}[{imm}]"),
+ "copy_u.w");
+}
+
+TEST_F(AssemblerMIPS64Test, InsertB) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::InsertB, 4, "insert.b ${reg1}[{imm}], ${reg2}"),
+ "insert.b");
+}
+
+TEST_F(AssemblerMIPS64Test, InsertH) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::InsertH, 3, "insert.h ${reg1}[{imm}], ${reg2}"),
+ "insert.h");
+}
+
+TEST_F(AssemblerMIPS64Test, InsertW) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::InsertW, 2, "insert.w ${reg1}[{imm}], ${reg2}"),
+ "insert.w");
+}
+
+TEST_F(AssemblerMIPS64Test, InsertD) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::InsertD, 1, "insert.d ${reg1}[{imm}], ${reg2}"),
+ "insert.d");
+}
+
TEST_F(AssemblerMIPS64Test, FillB) {
DriverStr(RepeatVR(&mips64::Mips64Assembler::FillB, "fill.b ${reg1}, ${reg2}"), "fill.b");
}
@@ -3469,6 +3524,26 @@ TEST_F(AssemblerMIPS64Test, StD) {
"st.d");
}
+TEST_F(AssemblerMIPS64Test, IlvlB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvlB, "ilvl.b ${reg1}, ${reg2}, ${reg3}"),
+ "ilvl.b");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvlH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvlH, "ilvl.h ${reg1}, ${reg2}, ${reg3}"),
+ "ilvl.h");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvlW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvlW, "ilvl.w ${reg1}, ${reg2}, ${reg3}"),
+ "ilvl.w");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvlD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvlD, "ilvl.d ${reg1}, ${reg2}, ${reg3}"),
+ "ilvl.d");
+}
+
TEST_F(AssemblerMIPS64Test, IlvrB) {
DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvrB, "ilvr.b ${reg1}, ${reg2}, ${reg3}"),
"ilvr.b");
@@ -3489,6 +3564,46 @@ TEST_F(AssemblerMIPS64Test, IlvrD) {
"ilvr.d");
}
+TEST_F(AssemblerMIPS64Test, IlvevB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvevB, "ilvev.b ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.b");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvevH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvevH, "ilvev.h ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.h");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvevW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvevW, "ilvev.w ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.w");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvevD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvevD, "ilvev.d ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.d");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvodB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvodB, "ilvod.b ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.b");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvodH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvodH, "ilvod.h ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.h");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvodW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvodW, "ilvod.w ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.w");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvodD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvodD, "ilvod.d ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.d");
+}
+
TEST_F(AssemblerMIPS64Test, MaddvB) {
DriverStr(RepeatVVV(&mips64::Mips64Assembler::MaddvB, "maddv.b ${reg1}, ${reg2}, ${reg3}"),
"maddv.b");
@@ -3509,6 +3624,36 @@ TEST_F(AssemblerMIPS64Test, MaddvD) {
"maddv.d");
}
+TEST_F(AssemblerMIPS64Test, Hadd_sH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_sH, "hadd_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_s.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Hadd_sW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_sW, "hadd_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_s.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Hadd_sD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_sD, "hadd_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Hadd_uH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_uH, "hadd_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_u.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Hadd_uW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_uW, "hadd_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_u.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Hadd_uD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_uD, "hadd_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_u.d");
+}
+
TEST_F(AssemblerMIPS64Test, MsubvB) {
DriverStr(RepeatVVV(&mips64::Mips64Assembler::MsubvB, "msubv.b ${reg1}, ${reg2}, ${reg3}"),
"msubv.b");
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 56eaf1951e..99219d8f88 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -171,7 +171,7 @@ class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, Poi
class X86JNIMacroLabel FINAL
: public JNIMacroLabelCommon<X86JNIMacroLabel,
art::Label,
- kX86> {
+ InstructionSet::kX86> {
public:
art::Label* AsX86() {
return AsPlatformLabel();
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index d1a3032a56..d766ad4716 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -197,7 +197,7 @@ class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assemble
class X86_64JNIMacroLabel FINAL
: public JNIMacroLabelCommon<X86_64JNIMacroLabel,
art::Label,
- kX86_64> {
+ InstructionSet::kX86_64> {
public:
art::Label* AsX86_64() {
return AsPlatformLabel();
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index a93b0e7f0c..13d2655917 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -39,6 +39,10 @@ art_cc_defaults {
"liblz4",
"liblzma",
],
+ include_dirs: [
+ "external/lz4/lib",
+ "external/zlib",
+ ],
export_include_dirs: ["."],
// For SHA-1 checksumming of build ID
@@ -220,6 +224,9 @@ art_cc_test {
"linker/oat_writer_test.cc",
],
header_libs: ["dex2oat_headers"],
+ include_dirs: [
+ "external/zlib",
+ ],
shared_libs: [
"libartd-compiler",
"libartd-dexlayout",
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 3abf36f326..9fa7f697d9 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -43,6 +43,7 @@
#include "art_method-inl.h"
#include "base/callee_save_type.h"
#include "base/dumpable.h"
+#include "base/file_utils.h"
#include "base/macros.h"
#include "base/scoped_flock.h"
#include "base/stl_util.h"
@@ -448,6 +449,12 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError(" --dirty-image-objects=<directory-path>: list of known dirty objects in the image.");
UsageError(" The image writer will group them together.");
UsageError("");
+ UsageError(" --compact-dex-level=none|fast: None avoids generating compact dex, fast");
+ UsageError(" generates compact dex with low compile time.");
+ UsageError("");
+ UsageError(" --deduplicate-code=true|false: enable|disable code deduplication. Deduplicated");
+ UsageError(" code will have an arbitrary symbol tagged with [DEDUPED].");
+ UsageError("");
std::cerr << "See log for usage error information\n";
exit(EXIT_FAILURE);
}
@@ -585,7 +592,7 @@ class Dex2Oat FINAL {
public:
explicit Dex2Oat(TimingLogger* timings) :
compiler_kind_(Compiler::kOptimizing),
- instruction_set_(kRuntimeISA == kArm ? kThumb2 : kRuntimeISA),
+ instruction_set_(kRuntimeISA == InstructionSet::kArm ? InstructionSet::kThumb2 : kRuntimeISA),
// Take the default set of instruction features from the build.
image_file_location_oat_checksum_(0),
image_file_location_oat_data_begin_(0),
@@ -889,13 +896,13 @@ class Dex2Oat FINAL {
// Checks are all explicit until we know the architecture.
// Set the compilation target's implicit checks options.
switch (instruction_set_) {
- case kArm:
- case kThumb2:
- case kArm64:
- case kX86:
- case kX86_64:
- case kMips:
- case kMips64:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
+ case InstructionSet::kArm64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64:
compiler_options_->implicit_null_checks_ = true;
compiler_options_->implicit_so_checks_ = true;
break;
@@ -1163,6 +1170,7 @@ class Dex2Oat FINAL {
std::unique_ptr<ParserOptions> parser_options(new ParserOptions());
+ AssignIfExists(args, M::CompactDexLevel, &compact_dex_level_);
AssignIfExists(args, M::DexFiles, &dex_filenames_);
AssignIfExists(args, M::DexLocations, &dex_locations_);
AssignIfExists(args, M::OatFiles, &oat_filenames_);
@@ -2226,8 +2234,12 @@ class Dex2Oat FINAL {
return UseProfile();
}
+ bool DoGenerateCompactDex() const {
+ return compact_dex_level_ != CompactDexLevel::kCompactDexLevelNone;
+ }
+
bool DoDexLayoutOptimizations() const {
- return DoProfileGuidedOptimizations();
+ return DoProfileGuidedOptimizations() || DoGenerateCompactDex();
}
bool DoOatLayoutOptimizations() const {
@@ -2450,7 +2462,8 @@ class Dex2Oat FINAL {
oat_writers_.emplace_back(new linker::OatWriter(
IsBootImage(),
timings_,
- do_oat_writer_layout ? profile_compilation_info_.get() : nullptr));
+ do_oat_writer_layout ? profile_compilation_info_.get() : nullptr,
+ compact_dex_level_));
}
}
@@ -2814,6 +2827,7 @@ class Dex2Oat FINAL {
// Dex files we are compiling, does not include the class path dex files.
std::vector<const DexFile*> dex_files_;
std::string no_inline_from_string_;
+ CompactDexLevel compact_dex_level_ = CompactDexLevel::kCompactDexLevelNone;
std::vector<std::unique_ptr<linker::ElfWriter>> elf_writers_;
std::vector<std::unique_ptr<linker::OatWriter>> oat_writers_;
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index ae7ebe2da1..a02fbf862f 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -24,6 +24,7 @@
#include "common_runtime_test.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/unix_file/fd_file.h"
@@ -65,6 +66,7 @@ class Dex2oatImageTest : public CommonRuntimeTest {
std::string error_msg;
CHECK(DexFileLoader::Open(dex.c_str(),
dex,
+ /*verify*/ true,
/*verify_checksum*/ false,
&error_msg,
&dex_files))
diff --git a/dex2oat/dex2oat_options.cc b/dex2oat/dex2oat_options.cc
index 43e6c4d02f..2cf070155f 100644
--- a/dex2oat/dex2oat_options.cc
+++ b/dex2oat/dex2oat_options.cc
@@ -27,7 +27,7 @@ template<>
struct CmdlineType<InstructionSet> : CmdlineTypeParser<InstructionSet> {
Result Parse(const std::string& option) {
InstructionSet set = GetInstructionSetFromString(option.c_str());
- if (set == kNone) {
+ if (set == InstructionSet::kNone) {
return Result::Failure(std::string("Not a valid instruction set: '") + option + "'");
}
return Result::Success(set);
@@ -239,6 +239,11 @@ static Parser CreateArgumentParser() {
.Define("--class-loader-context=_")
.WithType<std::string>()
.IntoKey(M::ClassLoaderContext)
+ .Define("--compact-dex-level=_")
+ .WithType<CompactDexLevel>()
+ .WithValueMap({{"none", CompactDexLevel::kCompactDexLevelNone},
+ {"fast", CompactDexLevel::kCompactDexLevelFast}})
+ .IntoKey(M::CompactDexLevel)
.Define("--runtime-arg _")
.WithType<std::vector<std::string>>().AppendValues()
.IntoKey(M::RuntimeOptions);
diff --git a/dex2oat/dex2oat_options.def b/dex2oat/dex2oat_options.def
index 83a3035ed5..9362a3df6f 100644
--- a/dex2oat/dex2oat_options.def
+++ b/dex2oat/dex2oat_options.def
@@ -34,6 +34,7 @@
//
// Parse-able keys from the command line.
+DEX2OAT_OPTIONS_KEY (CompactDexLevel, CompactDexLevel)
DEX2OAT_OPTIONS_KEY (std::vector<std::string>, DexFiles)
DEX2OAT_OPTIONS_KEY (std::vector<std::string>, DexLocations)
DEX2OAT_OPTIONS_KEY (int, ZipFd)
diff --git a/dex2oat/dex2oat_options.h b/dex2oat/dex2oat_options.h
index a4c718625f..f8198ee08b 100644
--- a/dex2oat/dex2oat_options.h
+++ b/dex2oat/dex2oat_options.h
@@ -22,6 +22,7 @@
#include <vector>
#include "base/variant_map.h"
+#include "cdex/compact_dex_level.h"
#include "cmdline_types.h" // TODO: don't need to include this file here
#include "compiler.h"
#include "driver/compiler_options_map.h"
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 1b731fc7f6..99be111165 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -678,7 +678,8 @@ class Dex2oatLayoutTest : public Dex2oatTest {
const char* location = dex_location.c_str();
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_TRUE(DexFileLoader::Open(location, location, true, &error_msg, &dex_files));
+ ASSERT_TRUE(DexFileLoader::Open(
+ location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& dex_file = dex_files[0];
GenerateProfile(profile_location,
@@ -812,7 +813,8 @@ class Dex2oatLayoutTest : public Dex2oatTest {
const char* location = dex_location.c_str();
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_TRUE(DexFileLoader::Open(location, location, true, &error_msg, &dex_files));
+ ASSERT_TRUE(DexFileLoader::Open(
+ location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& old_dex_file = dex_files[0];
@@ -823,13 +825,13 @@ class Dex2oatLayoutTest : public Dex2oatTest {
ASSERT_LT(class_def_count, std::numeric_limits<uint16_t>::max());
ASSERT_GE(class_def_count, 2U);
- // The new layout swaps the classes at indexes 0 and 1.
+ // Make sure the indexes stay the same.
std::string old_class0 = old_dex_file->PrettyType(old_dex_file->GetClassDef(0).class_idx_);
std::string old_class1 = old_dex_file->PrettyType(old_dex_file->GetClassDef(1).class_idx_);
std::string new_class0 = new_dex_file->PrettyType(new_dex_file->GetClassDef(0).class_idx_);
std::string new_class1 = new_dex_file->PrettyType(new_dex_file->GetClassDef(1).class_idx_);
- EXPECT_EQ(old_class0, new_class1);
- EXPECT_EQ(old_class1, new_class0);
+ EXPECT_EQ(old_class0, new_class0);
+ EXPECT_EQ(old_class1, new_class1);
}
EXPECT_EQ(odex_file->GetCompilerFilter(), CompilerFilter::kSpeedProfile);
@@ -966,6 +968,7 @@ class Dex2oatWatchdogTest : public Dex2oatTest {
std::string swap_location = GetOdexDir() + "/Dex2OatSwapTest.odex.swap";
copy.push_back("--swap-file=" + swap_location);
+ copy.push_back("-j512"); // Excessive idle threads just slow down dex2oat.
GenerateOdexForTest(dex_location,
odex_location,
CompilerFilter::kSpeed,
@@ -1372,9 +1375,19 @@ TEST_F(Dex2oatTest, LayoutSections) {
EXPECT_LT(code_item_offset - section_startup_only.offset_, section_startup_only.size_);
++startup_count;
} else {
- // If no flags are set, the method should be unused.
- EXPECT_LT(code_item_offset - section_unused.offset_, section_unused.size_);
- ++unused_count;
+ if (code_item_offset - section_unused.offset_ < section_unused.size_) {
+ // If no flags are set, the method should be unused ...
+ ++unused_count;
+ } else {
+ // or this method is part of the last code item and the end is 4 byte aligned.
+ ClassDataItemIterator it2(*dex_file, dex_file->GetClassData(*class_def));
+ it2.SkipAllFields();
+ for (; it2.HasNextDirectMethod() || it2.HasNextVirtualMethod(); it2.Next()) {
+ EXPECT_LE(it2.GetMethodCodeItemOffset(), code_item_offset);
+ }
+ uint32_t code_item_size = dex_file->FindCodeItemOffset(*class_def, method_idx);
+ EXPECT_EQ((code_item_offset + code_item_size) % 4, 0u);
+ }
}
}
DCHECK(!it.HasNext());
@@ -1385,4 +1398,119 @@ TEST_F(Dex2oatTest, LayoutSections) {
}
}
+// Test that generating compact dex works.
+TEST_F(Dex2oatTest, GenerateCompactDex) {
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+ // Generate a compact dex based odex.
+ const std::string dir = GetScratchDir();
+ const std::string oat_filename = dir + "/base.oat";
+ const std::string vdex_filename = dir + "/base.vdex";
+ std::string error_msg;
+ const int res = GenerateOdexForTestWithStatus(
+ {dex->GetLocation()},
+ oat_filename,
+ CompilerFilter::Filter::kQuicken,
+ &error_msg,
+ {"--compact-dex-level=fast"});
+ EXPECT_EQ(res, 0);
+ // Open our generated oat file.
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(oat_filename.c_str(),
+ oat_filename.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex->GetLocation().c_str(),
+ &error_msg));
+ ASSERT_TRUE(odex_file != nullptr);
+ std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
+ ASSERT_EQ(oat_dex_files.size(), 1u);
+ // Check that each dex is a compact dex.
+ for (const OatDexFile* oat_dex : oat_dex_files) {
+ std::unique_ptr<const DexFile> dex_file(oat_dex->OpenDexFile(&error_msg));
+ ASSERT_TRUE(dex_file != nullptr) << error_msg;
+ ASSERT_TRUE(dex_file->IsCompactDexFile());
+ }
+}
+
+class Dex2oatVerifierAbort : public Dex2oatTest {};
+
+TEST_F(Dex2oatVerifierAbort, HardFail) {
+ // Use VerifierDeps as it has hard-failing classes.
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("VerifierDeps"));
+ std::string out_dir = GetScratchDir();
+ const std::string base_oat_name = out_dir + "/base.oat";
+ std::string error_msg;
+ const int res_fail = GenerateOdexForTestWithStatus(
+ {dex->GetLocation()},
+ base_oat_name,
+ CompilerFilter::Filter::kQuicken,
+ &error_msg,
+ {"--abort-on-hard-verifier-error"});
+ EXPECT_NE(0, res_fail);
+
+ const int res_no_fail = GenerateOdexForTestWithStatus(
+ {dex->GetLocation()},
+ base_oat_name,
+ CompilerFilter::Filter::kQuicken,
+ &error_msg,
+ {"--no-abort-on-hard-verifier-error"});
+ EXPECT_EQ(0, res_no_fail);
+}
+
+TEST_F(Dex2oatVerifierAbort, SoftFail) {
+ // Use VerifierDepsMulti as it has hard-failing classes.
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("VerifierDepsMulti"));
+ std::string out_dir = GetScratchDir();
+ const std::string base_oat_name = out_dir + "/base.oat";
+ std::string error_msg;
+ const int res_fail = GenerateOdexForTestWithStatus(
+ {dex->GetLocation()},
+ base_oat_name,
+ CompilerFilter::Filter::kQuicken,
+ &error_msg,
+ {"--abort-on-soft-verifier-error"});
+ EXPECT_NE(0, res_fail);
+
+ const int res_no_fail = GenerateOdexForTestWithStatus(
+ {dex->GetLocation()},
+ base_oat_name,
+ CompilerFilter::Filter::kQuicken,
+ &error_msg,
+ {"--no-abort-on-soft-verifier-error"});
+ EXPECT_EQ(0, res_no_fail);
+}
+
+class Dex2oatDedupeCode : public Dex2oatTest {};
+
+TEST_F(Dex2oatDedupeCode, DedupeTest) {
+ // Use MyClassNatives. It has lots of native methods that will produce deduplicate-able code.
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("MyClassNatives"));
+ std::string out_dir = GetScratchDir();
+ const std::string base_oat_name = out_dir + "/base.oat";
+ size_t no_dedupe_size = 0;
+ GenerateOdexForTest(dex->GetLocation(),
+ base_oat_name,
+ CompilerFilter::Filter::kSpeed,
+ { "--deduplicate-code=false" },
+ true, // expect_success
+ false, // use_fd
+ [&no_dedupe_size](const OatFile& o) {
+ no_dedupe_size = o.Size();
+ });
+
+ size_t dedupe_size = 0;
+ GenerateOdexForTest(dex->GetLocation(),
+ base_oat_name,
+ CompilerFilter::Filter::kSpeed,
+ { "--deduplicate-code=true" },
+ true, // expect_success
+ false, // use_fd
+ [&dedupe_size](const OatFile& o) {
+ dedupe_size = o.Size();
+ });
+
+ EXPECT_LT(dedupe_size, no_dedupe_size);
+}
+
} // namespace art
diff --git a/dex2oat/linker/elf_writer_quick.cc b/dex2oat/linker/elf_writer_quick.cc
index 93f5a1d6cf..b139a12fd4 100644
--- a/dex2oat/linker/elf_writer_quick.cc
+++ b/dex2oat/linker/elf_writer_quick.cc
@@ -227,7 +227,8 @@ void ElfWriterQuick<ElfTypes>::WriteDynamicSection() {
if (bss_size_ != 0u) {
builder_->GetBss()->WriteNoBitsSection(bss_size_);
}
- if (builder_->GetIsa() == kMips || builder_->GetIsa() == kMips64) {
+ if (builder_->GetIsa() == InstructionSet::kMips ||
+ builder_->GetIsa() == InstructionSet::kMips64) {
builder_->WriteMIPSabiflagsSection();
}
builder_->WriteDynamicSection();
diff --git a/dex2oat/linker/elf_writer_test.cc b/dex2oat/linker/elf_writer_test.cc
index 9f8ed77526..8427e7b8ce 100644
--- a/dex2oat/linker/elf_writer_test.cc
+++ b/dex2oat/linker/elf_writer_test.cc
@@ -16,6 +16,7 @@
#include "elf_file.h"
+#include "base/file_utils.h"
#include "base/unix_file/fd_file.h"
#include "common_compiler_test.h"
#include "elf_file.h"
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 492c76bc54..d3d42b98bb 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -26,6 +26,7 @@
#include "android-base/stringprintf.h"
#include "art_method-inl.h"
+#include "base/file_utils.h"
#include "base/unix_file/fd_file.h"
#include "class_linker-inl.h"
#include "common_compiler_test.h"
@@ -247,7 +248,8 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
elf_writers.back()->Start();
oat_writers.emplace_back(new OatWriter(/*compiling_boot_image*/true,
&timings,
- /*profile_compilation_info*/nullptr));
+ /*profile_compilation_info*/nullptr,
+ CompactDexLevel::kCompactDexLevelNone));
}
std::vector<OutputStream*> rodata;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 17ceca3662..d8671d2dd0 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -330,7 +330,10 @@ class OatWriter::OatDexFile {
DCHECK_EQ(static_cast<off_t>(file_offset + offset_), out->Seek(0, kSeekCurrent)) \
<< "file_offset=" << file_offset << " offset_=" << offset_
-OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings, ProfileCompilationInfo* info)
+OatWriter::OatWriter(bool compiling_boot_image,
+ TimingLogger* timings,
+ ProfileCompilationInfo* info,
+ CompactDexLevel compact_dex_level)
: write_state_(WriteState::kAddingDexFileSources),
timings_(timings),
raw_dex_files_(),
@@ -404,7 +407,8 @@ OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings, ProfileCo
size_method_bss_mappings_(0u),
relative_patcher_(nullptr),
absolute_patch_locations_(),
- profile_compilation_info_(info) {
+ profile_compilation_info_(info),
+ compact_dex_level_(compact_dex_level) {
}
bool OatWriter::AddDexFileSource(const char* filename,
@@ -1181,18 +1185,6 @@ class OatWriter::LayoutReserveOffsetCodeMethodVisitor : public OrderedMethodVisi
quick_code_offset = NewQuickCodeOffset(compiled_method, method_ref, thumb_offset);
deduped = false;
}
-
- if (code_size != 0) {
- if (relative_patcher_->GetOffset(method_ref) != 0u) {
- // TODO: Should this be a hard failure?
- LOG(WARNING) << "Multiple definitions of "
- << method_ref.dex_file->PrettyMethod(method_ref.index)
- << " offsets " << relative_patcher_->GetOffset(method_ref)
- << " " << quick_code_offset;
- } else {
- relative_patcher_->SetOffset(method_ref, quick_code_offset);
- }
- }
} else {
quick_code_offset = dedupe_map_.GetOrCreate(
compiled_method,
@@ -3160,7 +3152,8 @@ bool OatWriter::WriteDexFile(OutputStream* out,
if (!SeekToDexFile(out, file, oat_dex_file)) {
return false;
}
- if (profile_compilation_info_ != nullptr) {
+ if (profile_compilation_info_ != nullptr ||
+ compact_dex_level_ != CompactDexLevel::kCompactDexLevelNone) {
CHECK(!update_input_vdex) << "We should never update the input vdex when doing dexlayout";
if (!LayoutAndWriteDexFile(out, oat_dex_file)) {
return false;
@@ -3259,7 +3252,8 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
PLOG(ERROR) << "Failed to dup dex file descriptor (" << raw_file->Fd() << ") at " << location;
return false;
}
- dex_file = DexFileLoader::OpenDex(dup_fd, location, /* verify_checksum */ true, &error_msg);
+ dex_file = DexFileLoader::OpenDex(
+ dup_fd, location, /* verify */ true, /* verify_checksum */ true, &error_msg);
} else {
// The source data is a vdex file.
CHECK(oat_dex_file->source_.IsRawData())
@@ -3286,6 +3280,7 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
}
Options options;
options.output_to_memmap_ = true;
+ options.compact_dex_level_ = compact_dex_level_;
DexLayout dex_layout(options, profile_compilation_info_, nullptr);
dex_layout.ProcessDexFile(location.c_str(), dex_file.get(), 0);
std::unique_ptr<MemMap> mem_map(dex_layout.GetAndReleaseMemMap());
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index c742fd4441..6a82fd1d59 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -24,6 +24,7 @@
#include "base/array_ref.h"
#include "base/dchecked_vector.h"
+#include "cdex/compact_dex_level.h"
#include "linker/relative_patcher.h" // For RelativePatcherTargetProvider.
#include "mem_map.h"
#include "method_reference.h"
@@ -114,7 +115,10 @@ class OatWriter {
kDefault = kCreate
};
- OatWriter(bool compiling_boot_image, TimingLogger* timings, ProfileCompilationInfo* info);
+ OatWriter(bool compiling_boot_image,
+ TimingLogger* timings,
+ ProfileCompilationInfo* info,
+ CompactDexLevel compact_dex_level);
// To produce a valid oat file, the user must first add sources with any combination of
// - AddDexFileSource(),
@@ -491,6 +495,9 @@ class OatWriter {
// Profile info used to generate new layout of files.
ProfileCompilationInfo* profile_compilation_info_;
+ // Compact dex level that is generated.
+ CompactDexLevel compact_dex_level_;
+
using OrderedMethodList = std::vector<OrderedMethodData>;
// List of compiled methods, sorted by the order defined in OrderedMethodData.
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index a19057a0ed..1ee2e4efd0 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -127,7 +127,8 @@ class OatTest : public CommonCompilerTest {
TimingLogger timings("WriteElf", false, false);
OatWriter oat_writer(/*compiling_boot_image*/false,
&timings,
- /*profile_compilation_info*/nullptr);
+ /*profile_compilation_info*/nullptr,
+ CompactDexLevel::kCompactDexLevelNone);
for (const DexFile* dex_file : dex_files) {
ArrayRef<const uint8_t> raw_dex_file(
reinterpret_cast<const uint8_t*>(&dex_file->GetHeader()),
@@ -148,7 +149,10 @@ class OatTest : public CommonCompilerTest {
bool verify,
ProfileCompilationInfo* profile_compilation_info) {
TimingLogger timings("WriteElf", false, false);
- OatWriter oat_writer(/*compiling_boot_image*/false, &timings, profile_compilation_info);
+ OatWriter oat_writer(/*compiling_boot_image*/false,
+ &timings,
+ profile_compilation_info,
+ CompactDexLevel::kCompactDexLevelNone);
for (const char* dex_filename : dex_filenames) {
if (!oat_writer.AddDexFileSource(dex_filename, dex_filename)) {
return false;
@@ -166,7 +170,8 @@ class OatTest : public CommonCompilerTest {
TimingLogger timings("WriteElf", false, false);
OatWriter oat_writer(/*compiling_boot_image*/false,
&timings,
- /*profile_compilation_info*/nullptr);
+ /*profile_compilation_info*/nullptr,
+ CompactDexLevel::kCompactDexLevelNone);
if (!oat_writer.AddZippedDexFilesSource(std::move(zip_fd), location)) {
return false;
}
@@ -387,7 +392,7 @@ TEST_F(OatTest, WriteRead) {
// TODO: make selectable.
Compiler::Kind compiler_kind = Compiler::kQuick;
- InstructionSet insn_set = kIsTargetBuild ? kThumb2 : kX86;
+ InstructionSet insn_set = kIsTargetBuild ? InstructionSet::kThumb2 : InstructionSet::kX86;
std::string error_msg;
SetupCompiler(compiler_kind, insn_set, std::vector<std::string>(), /*out*/ &error_msg);
@@ -486,7 +491,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
}
TEST_F(OatTest, OatHeaderIsValid) {
- InstructionSet insn_set = kX86;
+ InstructionSet insn_set = InstructionSet::kX86;
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> insn_features(
InstructionSetFeatures::FromVariant(insn_set, "default", &error_msg));
@@ -511,7 +516,7 @@ TEST_F(OatTest, EmptyTextSection) {
// TODO: make selectable.
Compiler::Kind compiler_kind = Compiler::kQuick;
InstructionSet insn_set = kRuntimeISA;
- if (insn_set == kArm) insn_set = kThumb2;
+ if (insn_set == InstructionSet::kArm) insn_set = InstructionSet::kThumb2;
std::string error_msg;
std::vector<std::string> compiler_options;
compiler_options.push_back("--compiler-filter=extract");
@@ -839,7 +844,7 @@ TEST_F(OatTest, ZipFileInputWithEmptyDex) {
}
TEST_F(OatTest, UpdateChecksum) {
- InstructionSet insn_set = kX86;
+ InstructionSet insn_set = InstructionSet::kX86;
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> insn_features(
InstructionSetFeatures::FromVariant(insn_set, "default", &error_msg));
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index 705043bbeb..4916d643c6 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -29,6 +29,27 @@ art_cc_binary {
],
}
+art_cc_binary {
+ name: "dexdumps",
+ host_supported: true,
+ device_supported: false,
+ srcs: [
+ "dexdump_cfg.cc",
+ "dexdump_main.cc",
+ "dexdump.cc",
+ ],
+ cflags: ["-Wall", "-Werror"],
+ static_libs: [
+ "libart",
+ "libbase",
+ ] + art_static_dependencies,
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
+}
+
art_cc_test {
name: "art_dexdump_tests",
defaults: [
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 3648a3edd0..4bfd91fdd9 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -1883,7 +1883,8 @@ int processFile(const char* fileName) {
const bool kVerifyChecksum = !gOptions.ignoreBadChecksum;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFileLoader::Open(fileName, fileName, kVerifyChecksum, &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(
+ fileName, fileName, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
// Display returned error message to user. Note that this error behavior
// differs from the error messages shown by the original Dalvik dexdump.
fputs(error_msg.c_str(), stderr);
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
index 29c9e92189..fabe6e785c 100644
--- a/dexlayout/Android.bp
+++ b/dexlayout/Android.bp
@@ -17,6 +17,7 @@ art_cc_defaults {
defaults: ["art_defaults"],
host_supported: true,
srcs: [
+ "compact_dex_writer.cc",
"dexlayout.cc",
"dex_ir.cc",
"dex_ir_builder.cc",
@@ -37,7 +38,10 @@ art_cc_library {
art_cc_library {
name: "libartd-dexlayout",
- defaults: ["libart-dexlayout-defaults"],
+ defaults: [
+ "libart-dexlayout-defaults",
+ "art_debug_defaults",
+ ],
shared_libs: ["libartd"],
}
diff --git a/dexlayout/compact_dex_writer.cc b/dexlayout/compact_dex_writer.cc
new file mode 100644
index 0000000000..b089c1d4b3
--- /dev/null
+++ b/dexlayout/compact_dex_writer.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compact_dex_writer.h"
+
+#include "cdex/compact_dex_file.h"
+
+namespace art {
+
+void CompactDexWriter::WriteHeader() {
+ CompactDexFile::Header header;
+ CompactDexFile::WriteMagic(&header.magic_[0]);
+ CompactDexFile::WriteCurrentVersion(&header.magic_[0]);
+ header.checksum_ = header_->Checksum();
+ std::copy_n(header_->Signature(), DexFile::kSha1DigestSize, header.signature_);
+ header.file_size_ = header_->FileSize();
+ header.header_size_ = header_->GetSize();
+ header.endian_tag_ = header_->EndianTag();
+ header.link_size_ = header_->LinkSize();
+ header.link_off_ = header_->LinkOffset();
+ const dex_ir::Collections& collections = header_->GetCollections();
+ header.map_off_ = collections.MapListOffset();
+ header.string_ids_size_ = collections.StringIdsSize();
+ header.string_ids_off_ = collections.StringIdsOffset();
+ header.type_ids_size_ = collections.TypeIdsSize();
+ header.type_ids_off_ = collections.TypeIdsOffset();
+ header.proto_ids_size_ = collections.ProtoIdsSize();
+ header.proto_ids_off_ = collections.ProtoIdsOffset();
+ header.field_ids_size_ = collections.FieldIdsSize();
+ header.field_ids_off_ = collections.FieldIdsOffset();
+ header.method_ids_size_ = collections.MethodIdsSize();
+ header.method_ids_off_ = collections.MethodIdsOffset();
+ header.class_defs_size_ = collections.ClassDefsSize();
+ header.class_defs_off_ = collections.ClassDefsOffset();
+ header.data_size_ = header_->DataSize();
+ header.data_off_ = header_->DataOffset();
+ Write(reinterpret_cast<uint8_t*>(&header), sizeof(header), 0u);
+}
+
+} // namespace art
diff --git a/dexlayout/compact_dex_writer.h b/dexlayout/compact_dex_writer.h
new file mode 100644
index 0000000000..1c77202c9a
--- /dev/null
+++ b/dexlayout/compact_dex_writer.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Header file of an in-memory representation of DEX files.
+ */
+
+#ifndef ART_DEXLAYOUT_COMPACT_DEX_WRITER_H_
+#define ART_DEXLAYOUT_COMPACT_DEX_WRITER_H_
+
+#include "dex_writer.h"
+
+namespace art {
+
+class CompactDexWriter : public DexWriter {
+ public:
+ CompactDexWriter(dex_ir::Header* header, MemMap* mem_map, CompactDexLevel compact_dex_level)
+ : DexWriter(header, mem_map),
+ compact_dex_level_(compact_dex_level) { }
+
+ protected:
+ void WriteHeader() OVERRIDE;
+
+ const CompactDexLevel compact_dex_level_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CompactDexWriter);
+};
+
+} // namespace art
+
+#endif // ART_DEXLAYOUT_COMPACT_DEX_WRITER_H_
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index f75eacc2d3..3edb0a44f2 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -167,10 +167,17 @@ static bool GetIdsFromByteCode(Collections& collections,
std::vector<MethodId*>* method_ids,
std::vector<FieldId*>* field_ids) {
bool has_id = false;
- for (const Instruction& instruction : code->Instructions()) {
- CHECK_GT(instruction.SizeInCodeUnits(), 0u);
+ IterationRange<DexInstructionIterator> instructions = code->Instructions();
+ SafeDexInstructionIterator it(instructions.begin(), instructions.end());
+ for (; !it.IsErrorState() && it < instructions.end(); ++it) {
+ // In case the instruction goes past the end of the code item, make sure to not process it.
+ SafeDexInstructionIterator next = it;
+ ++next;
+ if (next.IsErrorState()) {
+ break;
+ }
has_id |= GetIdFromInstruction(collections,
- &instruction,
+ &it.Inst(),
type_ids,
string_ids,
method_ids,
@@ -403,8 +410,23 @@ EncodedArrayItem* Collections::CreateEncodedArrayItem(const uint8_t* static_data
return encoded_array_item;
}
-AnnotationItem* Collections::CreateAnnotationItem(const DexFile::AnnotationItem* annotation,
- uint32_t offset) {
+void Collections::AddAnnotationsFromMapListSection(const DexFile& dex_file,
+ uint32_t start_offset,
+ uint32_t count) {
+ uint32_t current_offset = start_offset;
+ for (size_t i = 0; i < count; ++i) {
+ // Annotation that we didn't process already, add it to the set.
+ const DexFile::AnnotationItem* annotation = dex_file.GetAnnotationItemAtOffset(current_offset);
+ AnnotationItem* annotation_item = CreateAnnotationItem(dex_file, annotation);
+ DCHECK(annotation_item != nullptr);
+ current_offset += annotation_item->GetSize();
+ }
+}
+
+AnnotationItem* Collections::CreateAnnotationItem(const DexFile& dex_file,
+ const DexFile::AnnotationItem* annotation) {
+ const uint8_t* const start_data = reinterpret_cast<const uint8_t*>(annotation);
+ const uint32_t offset = start_data - dex_file.Begin();
auto found_annotation_item = AnnotationItems().find(offset);
if (found_annotation_item != AnnotationItems().end()) {
return found_annotation_item->second.get();
@@ -413,10 +435,11 @@ AnnotationItem* Collections::CreateAnnotationItem(const DexFile::AnnotationItem*
const uint8_t* annotation_data = annotation->annotation_;
std::unique_ptr<EncodedValue> encoded_value(
ReadEncodedValue(&annotation_data, DexFile::kDexAnnotationAnnotation, 0));
- // TODO: Calculate the size of the annotation.
AnnotationItem* annotation_item =
new AnnotationItem(visibility, encoded_value->ReleaseEncodedAnnotation());
- annotation_items_.AddItem(annotation_item, offset);
+ annotation_item->SetOffset(offset);
+ annotation_item->SetSize(annotation_data - start_data);
+ annotation_items_.AddItem(annotation_item, annotation_item->GetOffset());
return annotation_item;
}
@@ -437,8 +460,7 @@ AnnotationSetItem* Collections::CreateAnnotationSetItem(const DexFile& dex_file,
if (annotation == nullptr) {
continue;
}
- AnnotationItem* annotation_item =
- CreateAnnotationItem(annotation, disk_annotations_item->entries_[i]);
+ AnnotationItem* annotation_item = CreateAnnotationItem(dex_file, annotation);
items->push_back(annotation_item);
}
AnnotationSetItem* annotation_set_item = new AnnotationSetItem(items);
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index df3484c012..179d3b96e0 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -208,7 +208,8 @@ class Collections {
TypeList* CreateTypeList(const DexFile::TypeList* type_list, uint32_t offset);
EncodedArrayItem* CreateEncodedArrayItem(const uint8_t* static_data, uint32_t offset);
- AnnotationItem* CreateAnnotationItem(const DexFile::AnnotationItem* annotation, uint32_t offset);
+ AnnotationItem* CreateAnnotationItem(const DexFile& dex_file,
+ const DexFile::AnnotationItem* annotation);
AnnotationSetItem* CreateAnnotationSetItem(const DexFile& dex_file,
const DexFile::AnnotationSetItem* disk_annotations_item, uint32_t offset);
AnnotationsDirectoryItem* CreateAnnotationsDirectoryItem(const DexFile& dex_file,
@@ -216,6 +217,9 @@ class Collections {
CodeItem* CreateCodeItem(
const DexFile& dex_file, const DexFile::CodeItem& disk_code_item, uint32_t offset);
ClassData* CreateClassData(const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset);
+ void AddAnnotationsFromMapListSection(const DexFile& dex_file,
+ uint32_t start_offset,
+ uint32_t count);
StringId* GetStringId(uint32_t index) {
CHECK_LT(index, StringIdsSize());
@@ -948,8 +952,8 @@ class CodeItem : public Item {
void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
IterationRange<DexInstructionIterator> Instructions() const {
- return MakeIterationRange(DexInstructionIterator(Insns()),
- DexInstructionIterator(Insns() + InsnsSize()));
+ return MakeIterationRange(DexInstructionIterator(Insns(), 0u),
+ DexInstructionIterator(Insns(), InsnsSize()));
}
private:
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index 8eb726a64a..bd3e1fa718 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -152,6 +152,7 @@ static void CheckAndSetRemainingOffsets(const DexFile& dex_file, Collections* co
break;
case DexFile::kDexTypeAnnotationItem:
collections->SetAnnotationItemsOffset(item->offset_);
+ collections->AddAnnotationsFromMapListSection(dex_file, item->offset_, item->size_);
break;
case DexFile::kDexTypeEncodedArrayItem:
collections->SetEncodedArrayItemsOffset(item->offset_);
diff --git a/dexlayout/dex_writer.cc b/dexlayout/dex_writer.cc
index 11ba2a6357..4895ab6957 100644
--- a/dexlayout/dex_writer.cc
+++ b/dexlayout/dex_writer.cc
@@ -12,8 +12,6 @@
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
- *
- * Header file of an in-memory representation of DEX files.
*/
#include "dex_writer.h"
@@ -23,7 +21,10 @@
#include <queue>
#include <vector>
+#include "cdex/compact_dex_file.h"
+#include "compact_dex_writer.h"
#include "dex_file_types.h"
+#include "standard_dex_file.h"
#include "utf.h"
namespace art {
@@ -627,37 +628,36 @@ void DexWriter::WriteMapItem() {
}
void DexWriter::WriteHeader() {
- uint32_t buffer[20];
- dex_ir::Collections& collections = header_->GetCollections();
- size_t offset = 0;
- offset += Write(header_->Magic(), 8 * sizeof(uint8_t), offset);
- buffer[0] = header_->Checksum();
- offset += Write(buffer, sizeof(uint32_t), offset);
- offset += Write(header_->Signature(), 20 * sizeof(uint8_t), offset);
- uint32_t file_size = header_->FileSize();
- buffer[0] = file_size;
- buffer[1] = header_->GetSize();
- buffer[2] = header_->EndianTag();
- buffer[3] = header_->LinkSize();
- buffer[4] = header_->LinkOffset();
- buffer[5] = collections.MapListOffset();
- buffer[6] = collections.StringIdsSize();
- buffer[7] = collections.StringIdsOffset();
- buffer[8] = collections.TypeIdsSize();
- buffer[9] = collections.TypeIdsOffset();
- buffer[10] = collections.ProtoIdsSize();
- buffer[11] = collections.ProtoIdsOffset();
- buffer[12] = collections.FieldIdsSize();
- buffer[13] = collections.FieldIdsOffset();
- buffer[14] = collections.MethodIdsSize();
- buffer[15] = collections.MethodIdsOffset();
- uint32_t class_defs_size = collections.ClassDefsSize();
- uint32_t class_defs_off = collections.ClassDefsOffset();
- buffer[16] = class_defs_size;
- buffer[17] = class_defs_off;
- buffer[18] = header_->DataSize();
- buffer[19] = header_->DataOffset();
- Write(buffer, 20 * sizeof(uint32_t), offset);
+ StandardDexFile::Header header;
+ static constexpr size_t kMagicAndVersionLen =
+ StandardDexFile::kDexMagicSize + StandardDexFile::kDexVersionLen;
+ std::copy_n(header_->Magic(), kMagicAndVersionLen, header.magic_);
+ header.checksum_ = header_->Checksum();
+ std::copy_n(header_->Signature(), DexFile::kSha1DigestSize, header.signature_);
+ header.file_size_ = header_->FileSize();
+ header.header_size_ = header_->GetSize();
+ header.endian_tag_ = header_->EndianTag();
+ header.link_size_ = header_->LinkSize();
+ header.link_off_ = header_->LinkOffset();
+ const dex_ir::Collections& collections = header_->GetCollections();
+ header.map_off_ = collections.MapListOffset();
+ header.string_ids_size_ = collections.StringIdsSize();
+ header.string_ids_off_ = collections.StringIdsOffset();
+ header.type_ids_size_ = collections.TypeIdsSize();
+ header.type_ids_off_ = collections.TypeIdsOffset();
+ header.proto_ids_size_ = collections.ProtoIdsSize();
+ header.proto_ids_off_ = collections.ProtoIdsOffset();
+ header.field_ids_size_ = collections.FieldIdsSize();
+ header.field_ids_off_ = collections.FieldIdsOffset();
+ header.method_ids_size_ = collections.MethodIdsSize();
+ header.method_ids_off_ = collections.MethodIdsOffset();
+ header.class_defs_size_ = collections.ClassDefsSize();
+ header.class_defs_off_ = collections.ClassDefsOffset();
+ header.data_size_ = header_->DataSize();
+ header.data_off_ = header_->DataOffset();
+
+ static_assert(sizeof(header) == 0x70, "Size doesn't match dex spec");
+ Write(reinterpret_cast<uint8_t*>(&header), sizeof(header), 0u);
}
void DexWriter::WriteMemMap() {
@@ -681,9 +681,14 @@ void DexWriter::WriteMemMap() {
WriteHeader();
}
-void DexWriter::Output(dex_ir::Header* header, MemMap* mem_map) {
- DexWriter dex_writer(header, mem_map);
- dex_writer.WriteMemMap();
+void DexWriter::Output(dex_ir::Header* header, MemMap* mem_map, CompactDexLevel compact_dex_level) {
+ std::unique_ptr<DexWriter> writer;
+ if (compact_dex_level != CompactDexLevel::kCompactDexLevelNone) {
+ writer.reset(new CompactDexWriter(header, mem_map, compact_dex_level));
+ } else {
+ writer.reset(new DexWriter(header, mem_map));
+ }
+ writer->WriteMemMap();
}
} // namespace art
diff --git a/dexlayout/dex_writer.h b/dexlayout/dex_writer.h
index b396adf126..85d3e7ebf3 100644
--- a/dexlayout/dex_writer.h
+++ b/dexlayout/dex_writer.h
@@ -20,6 +20,7 @@
#define ART_DEXLAYOUT_DEX_WRITER_H_
#include "base/unix_file/fd_file.h"
+#include "cdex/compact_dex_level.h"
#include "dex_ir.h"
#include "mem_map.h"
#include "os.h"
@@ -28,11 +29,13 @@ namespace art {
class DexWriter {
public:
- DexWriter(dex_ir::Header* header, MemMap* mem_map) : header_(header), mem_map_(mem_map) { }
+ DexWriter(dex_ir::Header* header, MemMap* mem_map) : header_(header), mem_map_(mem_map) {}
- static void Output(dex_ir::Header* header, MemMap* mem_map);
+ static void Output(dex_ir::Header* header, MemMap* mem_map, CompactDexLevel compact_dex_level);
- private:
+ virtual ~DexWriter() {}
+
+ protected:
void WriteMemMap();
size_t Write(const void* buffer, size_t length, size_t offset);
@@ -62,11 +65,12 @@ class DexWriter {
void WriteCallSites();
void WriteMethodHandles();
void WriteMapItem();
- void WriteHeader();
+ virtual void WriteHeader();
dex_ir::Header* const header_;
MemMap* const mem_map_;
+ private:
DISALLOW_COPY_AND_ASSIGN(DexWriter);
};
diff --git a/dexlayout/dexdiag_test.cc b/dexlayout/dexdiag_test.cc
index 6fcd6ffe9f..9927576400 100644
--- a/dexlayout/dexdiag_test.cc
+++ b/dexlayout/dexdiag_test.cc
@@ -19,6 +19,7 @@
#include "common_runtime_test.h"
+#include "base/file_utils.h"
#include "exec_utils.h"
#include "oat_file.h"
#include "os.h"
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 40449ae8bd..dd2e809a92 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -52,6 +52,11 @@ namespace art {
using android::base::StringPrintf;
+// Setting this to false disables class def layout entirely, which is stronger than strictly
+// necessary to ensure the partial order w.r.t. class derivation. TODO: Re-enable (b/68317550).
+static constexpr bool kChangeClassDefOrder = false;
+
+static constexpr uint32_t kDataSectionAlignment = sizeof(uint32_t) * 2;
static constexpr uint32_t kDexCodeItemAlignment = 4;
/*
@@ -1049,15 +1054,13 @@ void DexLayout::DumpBytecodes(uint32_t idx, const dex_ir::CodeItem* code, uint32
code_offset, code_offset, dot.c_str(), name, type_descriptor.c_str());
// Iterate over all instructions.
- IterationRange<DexInstructionIterator> instructions = code->Instructions();
- for (auto inst = instructions.begin(); inst != instructions.end(); ++inst) {
- const uint32_t dex_pc = inst.GetDexPC(instructions.begin());
+ for (const DexInstructionPcPair& inst : code->Instructions()) {
const uint32_t insn_width = inst->SizeInCodeUnits();
if (insn_width == 0) {
- fprintf(stderr, "GLITCH: zero-width instruction at idx=0x%04x\n", dex_pc);
+ fprintf(stderr, "GLITCH: zero-width instruction at idx=0x%04x\n", inst.DexPc());
break;
}
- DumpInstruction(code, code_offset, dex_pc, insn_width, &*inst);
+ DumpInstruction(code, code_offset, inst.DexPc(), insn_width, &inst.Inst());
} // for
}
@@ -1581,9 +1584,13 @@ std::vector<dex_ir::ClassData*> DexLayout::LayoutClassDefsAndClassData(const Dex
std::vector<dex_ir::ClassData*> new_class_data_order;
for (uint32_t i = 0; i < new_class_def_order.size(); ++i) {
dex_ir::ClassDef* class_def = new_class_def_order[i];
- class_def->SetIndex(i);
- class_def->SetOffset(class_defs_offset);
- class_defs_offset += dex_ir::ClassDef::ItemSize();
+ if (kChangeClassDefOrder) {
+ // This produces dex files that violate the spec since the super class class_def is supposed
+ // to occur before any subclasses.
+ class_def->SetIndex(i);
+ class_def->SetOffset(class_defs_offset);
+ class_defs_offset += dex_ir::ClassDef::ItemSize();
+ }
dex_ir::ClassData* class_data = class_def->GetClassData();
if (class_data != nullptr && visited_class_data.find(class_data) == visited_class_data.end()) {
class_data->SetOffset(class_data_offset);
@@ -1595,7 +1602,7 @@ std::vector<dex_ir::ClassData*> DexLayout::LayoutClassDefsAndClassData(const Dex
return new_class_data_order;
}
-void DexLayout::LayoutStringData(const DexFile* dex_file) {
+int32_t DexLayout::LayoutStringData(const DexFile* dex_file) {
const size_t num_strings = header_->GetCollections().StringIds().size();
std::vector<bool> is_shorty(num_strings, false);
std::vector<bool> from_hot_method(num_strings, false);
@@ -1708,13 +1715,11 @@ void DexLayout::LayoutStringData(const DexFile* dex_file) {
offset += data->GetSize() + 1; // Add one extra for null.
}
if (offset > max_offset) {
- const uint32_t diff = offset - max_offset;
+ return offset - max_offset;
// If we expanded the string data section, we need to update the offsets or else we will
// corrupt the next section when writing out.
- FixupSections(header_->GetCollections().StringDatasOffset(), diff);
- // Update file size.
- header_->SetFileSize(header_->FileSize() + diff);
}
+ return 0;
}
// Orders code items according to specified class data ordering.
@@ -1794,6 +1799,10 @@ int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
}
}
+ // Removing duplicate CodeItems may expose other issues with downstream
+ // optimizations such as quickening. But we need to ensure at least the weak
+ // forms of it currently in use do not break layout optimizations.
+ std::map<dex_ir::CodeItem*, uint32_t> original_code_item_offset;
// Total_diff includes diffs generated by clinits, executed, and non-executed methods.
int32_t total_diff = 0;
// The relative placement has no effect on correctness; it is used to ensure
@@ -1812,11 +1821,22 @@ int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
dex_ir::CodeItem* code_item = method->GetCodeItem();
if (code_item != nullptr &&
code_items_set.find(code_item) != code_items_set.end()) {
- diff += UnsignedLeb128Size(code_item_offset)
- - UnsignedLeb128Size(code_item->GetOffset());
- code_item->SetOffset(code_item_offset);
- code_item_offset +=
- RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
+ // Compute where the CodeItem was originally laid out.
+ uint32_t original_offset = code_item->GetOffset();
+ auto it = original_code_item_offset.find(code_item);
+ if (it != original_code_item_offset.end()) {
+ original_offset = it->second;
+ } else {
+ original_code_item_offset[code_item] = code_item->GetOffset();
+ // Assign the new offset and move the pointer to allocate space.
+ code_item->SetOffset(code_item_offset);
+ code_item_offset +=
+ RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
+ }
+ // Update the size of the encoded methods to reflect that the offset difference
+ // may have changed the ULEB128 length.
+ diff +=
+ UnsignedLeb128Size(code_item->GetOffset()) - UnsignedLeb128Size(original_offset);
}
}
}
@@ -1939,13 +1959,23 @@ void DexLayout::FixupSections(uint32_t offset, uint32_t diff) {
}
void DexLayout::LayoutOutputFile(const DexFile* dex_file) {
- LayoutStringData(dex_file);
+ const int32_t string_diff = LayoutStringData(dex_file);
+ // If we expanded the string data section, we need to update the offsets or else we will
+ // corrupt the next section when writing out.
+ FixupSections(header_->GetCollections().StringDatasOffset(), string_diff);
+ // Update file size.
+ header_->SetFileSize(header_->FileSize() + string_diff);
+
std::vector<dex_ir::ClassData*> new_class_data_order = LayoutClassDefsAndClassData(dex_file);
- int32_t diff = LayoutCodeItems(dex_file, new_class_data_order);
+ const int32_t code_item_diff = LayoutCodeItems(dex_file, new_class_data_order);
// Move sections after ClassData by diff bytes.
- FixupSections(header_->GetCollections().ClassDatasOffset(), diff);
- // Update file size.
- header_->SetFileSize(header_->FileSize() + diff);
+ FixupSections(header_->GetCollections().ClassDatasOffset(), code_item_diff);
+
+ // Update file and data size.
+ // The data size must be aligned to kDataSectionAlignment.
+ const int32_t total_diff = code_item_diff + string_diff;
+ header_->SetDataSize(RoundUp(header_->DataSize() + total_diff, kDataSectionAlignment));
+ header_->SetFileSize(header_->FileSize() + total_diff);
}
void DexLayout::OutputDexFile(const DexFile* dex_file) {
@@ -1986,29 +2016,10 @@ void DexLayout::OutputDexFile(const DexFile* dex_file) {
}
return;
}
- DexWriter::Output(header_, mem_map_.get());
+ DexWriter::Output(header_, mem_map_.get(), options_.compact_dex_level_);
if (new_file != nullptr) {
UNUSED(new_file->FlushCloseOrErase());
}
- // Verify the output dex file's structure for debug builds.
- if (kIsDebugBuild) {
- std::string location = "memory mapped file for " + dex_file_location;
- std::unique_ptr<const DexFile> output_dex_file(DexFileLoader::Open(mem_map_->Begin(),
- mem_map_->Size(),
- location,
- header_->Checksum(),
- /*oat_dex_file*/ nullptr,
- /*verify*/ true,
- /*verify_checksum*/ false,
- &error_msg));
- DCHECK(output_dex_file != nullptr) << "Failed to re-open output file:" << error_msg;
- }
- // Do IR-level comparison between input and output. This check ignores potential differences
- // due to layout, so offsets are not checked. Instead, it checks the data contents of each item.
- if (kIsDebugBuild || options_.verify_output_) {
- std::unique_ptr<dex_ir::Header> orig_header(dex_ir::DexIrBuilder(*dex_file));
- CHECK(VerifyOutputDexFile(orig_header.get(), header_, &error_msg)) << error_msg;
- }
}
/*
@@ -2040,12 +2051,38 @@ void DexLayout::ProcessDexFile(const char* file_name,
DumpDexFile();
}
- // Output dex file as file or memmap.
+ // In case we are outputting to a file, keep it open so we can verify.
if (options_.output_dex_directory_ != nullptr || options_.output_to_memmap_) {
if (info_ != nullptr) {
LayoutOutputFile(dex_file);
}
OutputDexFile(dex_file);
+
+ // Clear header before verifying to reduce peak RAM usage.
+ header.reset();
+
+ // Verify the output dex file's structure, only enabled by default for debug builds.
+ if (options_.verify_output_) {
+ std::string error_msg;
+ std::string location = "memory mapped file for " + std::string(file_name);
+ std::unique_ptr<const DexFile> output_dex_file(DexFileLoader::Open(mem_map_->Begin(),
+ mem_map_->Size(),
+ location,
+ /* checksum */ 0,
+ /*oat_dex_file*/ nullptr,
+ /*verify*/ true,
+ /*verify_checksum*/ false,
+ &error_msg));
+ CHECK(output_dex_file != nullptr) << "Failed to re-open output file:" << error_msg;
+
+ // Do IR-level comparison between input and output. This check ignores potential differences
+ // due to layout, so offsets are not checked. Instead, it checks the data contents of each item.
+ //
+ // Regenerate output IR to catch any bugs that might happen during writing.
+ std::unique_ptr<dex_ir::Header> output_header(dex_ir::DexIrBuilder(*output_dex_file));
+ std::unique_ptr<dex_ir::Header> orig_header(dex_ir::DexIrBuilder(*dex_file));
+ CHECK(VerifyOutputDexFile(output_header.get(), orig_header.get(), &error_msg)) << error_msg;
+ }
}
}
@@ -2062,7 +2099,8 @@ int DexLayout::ProcessFile(const char* file_name) {
const bool verify_checksum = !options_.ignore_bad_checksum_;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFileLoader::Open(file_name, file_name, verify_checksum, &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(
+ file_name, file_name, /* verify */ true, verify_checksum, &error_msg, &dex_files)) {
// Display returned error message to user. Note that this error behavior
// differs from the error messages shown by the original Dalvik dexdump.
fputs(error_msg.c_str(), stderr);
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index 180d9bc87c..2e897739cc 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -26,6 +26,7 @@
#include <stdint.h>
#include <stdio.h>
+#include "cdex/compact_dex_level.h"
#include "dex_file_layout.h"
#include "dex_ir.h"
#include "mem_map.h"
@@ -59,9 +60,9 @@ class Options {
bool show_section_headers_ = false;
bool show_section_statistics_ = false;
bool verbose_ = false;
- // TODO: Set verify_output_ back to false by default. Was set to true for debugging b/62840842.
- bool verify_output_ = true;
+ bool verify_output_ = kIsDebugBuild;
bool visualize_pattern_ = false;
+ CompactDexLevel compact_dex_level_ = CompactDexLevel::kCompactDexLevelNone;
OutputFormat output_format_ = kOutputPlain;
const char* output_dex_directory_ = nullptr;
const char* output_file_name_ = nullptr;
@@ -122,7 +123,7 @@ class DexLayout {
std::vector<dex_ir::ClassData*> LayoutClassDefsAndClassData(const DexFile* dex_file);
int32_t LayoutCodeItems(const DexFile* dex_file,
std::vector<dex_ir::ClassData*> new_class_data_order);
- void LayoutStringData(const DexFile* dex_file);
+ int32_t LayoutStringData(const DexFile* dex_file);
bool IsNextSectionCodeItemAligned(uint32_t offset);
template<class T> void FixupSection(std::map<uint32_t, std::unique_ptr<T>>& map, uint32_t diff);
void FixupSections(uint32_t offset, uint32_t diff);
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index f8fa893069..08673056d9 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -317,6 +317,30 @@ class DexLayoutTest : public CommonRuntimeTest {
return true;
}
+ template <typename Mutator>
+ bool MutateDexFile(File* output_dex, const std::string& input_jar, const Mutator& mutator) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ std::string error_msg;
+ CHECK(DexFileLoader::Open(input_jar.c_str(),
+ input_jar.c_str(),
+ /*verify*/ true,
+ /*verify_checksum*/ true,
+ &error_msg,
+ &dex_files)) << error_msg;
+ EXPECT_EQ(dex_files.size(), 1u) << "Only one input dex is supported";
+ for (const std::unique_ptr<const DexFile>& dex : dex_files) {
+ CHECK(dex->EnableWrite()) << "Failed to enable write";
+ mutator(const_cast<DexFile*>(dex.get()));
+ if (!output_dex->WriteFully(dex->Begin(), dex->Size())) {
+ return false;
+ }
+ }
+ if (output_dex->Flush() != 0) {
+ PLOG(FATAL) << "Could not flush the output file.";
+ }
+ return true;
+ }
+
// Create a profile with some subset of methods and classes.
void CreateProfile(const std::string& input_dex,
const std::string& out_profile,
@@ -325,7 +349,8 @@ class DexLayoutTest : public CommonRuntimeTest {
std::string error_msg;
bool result = DexFileLoader::Open(input_dex.c_str(),
input_dex,
- false,
+ /*verify*/ true,
+ /*verify_checksum*/ false,
&error_msg,
&dex_files);
@@ -517,8 +542,10 @@ class DexLayoutTest : public CommonRuntimeTest {
const char* dex_filename,
ScratchFile* profile_file,
std::vector<std::string>& dexlayout_exec_argv) {
- WriteBase64ToFile(dex_filename, dex_file->GetFile());
- EXPECT_EQ(dex_file->GetFile()->Flush(), 0);
+ if (dex_filename != nullptr) {
+ WriteBase64ToFile(dex_filename, dex_file->GetFile());
+ EXPECT_EQ(dex_file->GetFile()->Flush(), 0);
+ }
if (profile_file != nullptr) {
CreateProfile(dex_file->GetFilename(), profile_file->GetFilename(), dex_file->GetFilename());
}
@@ -672,4 +699,58 @@ TEST_F(DexLayoutTest, DuplicateCodeItem) {
dexlayout_exec_argv));
}
+// Test that instructions that go past the end of the code items don't cause crashes.
+TEST_F(DexLayoutTest, CodeItemOverrun) {
+ ScratchFile temp_dex;
+ MutateDexFile(temp_dex.GetFile(), GetTestDexFileName("ManyMethods"), [] (DexFile* dex) {
+ bool mutated_successfully = false;
+ // Change the dex instructions to make an opcode that spans past the end of the code item.
+ for (size_t i = 0; i < dex->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& def = dex->GetClassDef(i);
+ const uint8_t* data = dex->GetClassData(def);
+ if (data == nullptr) {
+ continue;
+ }
+ ClassDataItemIterator it(*dex, data);
+ it.SkipAllFields();
+ while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
+ DexFile::CodeItem* item = const_cast<DexFile::CodeItem*>(it.GetMethodCodeItem());
+ if (item != nullptr) {
+ IterationRange<DexInstructionIterator> instructions = item->Instructions();
+ if (instructions.begin() != instructions.end()) {
+ DexInstructionIterator last_instruction = instructions.begin();
+ for (auto dex_it = instructions.begin(); dex_it != instructions.end(); ++dex_it) {
+ last_instruction = dex_it;
+ }
+ if (last_instruction->SizeInCodeUnits() == 1) {
+ // Set the opcode to something that will go past the end of the code item.
+ const_cast<Instruction&>(last_instruction.Inst()).SetOpcode(
+ Instruction::CONST_STRING_JUMBO);
+ mutated_successfully = true;
+ // Test that the safe iterator doesn't go past the end.
+ SafeDexInstructionIterator it2(instructions.begin(), instructions.end());
+ while (!it2.IsErrorState()) {
+ ++it2;
+ }
+ EXPECT_TRUE(it2 == last_instruction);
+ EXPECT_TRUE(it2 < instructions.end());
+ }
+ }
+ }
+ it.Next();
+ }
+ }
+ CHECK(mutated_successfully)
+ << "Failed to find candidate code item with only one code unit in last instruction.";
+ });
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-i", "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ /*dex_filename*/ nullptr,
+ nullptr /* profile_file */,
+ dexlayout_exec_argv));
+}
+
} // namespace art
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index e5870522a3..c8bc132da0 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -179,7 +179,8 @@ static int processFile(const char* fileName) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFileLoader::Open(fileName, fileName, kVerifyChecksum, &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(
+ fileName, fileName, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
fputs(error_msg.c_str(), stderr);
fputc('\n', stderr);
return -1;
diff --git a/dexoptanalyzer/dexoptanalyzer.cc b/dexoptanalyzer/dexoptanalyzer.cc
index 08d38d5925..39c9b9993b 100644
--- a/dexoptanalyzer/dexoptanalyzer.cc
+++ b/dexoptanalyzer/dexoptanalyzer.cc
@@ -18,7 +18,9 @@
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
+#include "base/file_utils.h"
#include "compiler_filter.h"
+#include "class_loader_context.h"
#include "dex_file.h"
#include "noop_compiler_callbacks.h"
#include "oat_file_assistant.h"
@@ -101,6 +103,8 @@ NO_RETURN static void Usage(const char *fmt, ...) {
UsageError("");
UsageError(" --vdex-fd=number: file descriptor of the vdex file corresponding to the oat file");
UsageError("");
+ UsageError(" --zip-fd=number: specifies a file descriptor corresponding to the dex file.");
+ UsageError("");
UsageError(" --downgrade: optional, if the purpose of dexopt is to downgrade the dex file");
UsageError(" By default, dexopt considers upgrade case.");
UsageError("");
@@ -159,7 +163,7 @@ class DexoptAnalyzer FINAL {
} else if (option.starts_with("--isa=")) {
std::string isa_str = option.substr(strlen("--isa=")).ToString();
isa_ = GetInstructionSetFromString(isa_str.c_str());
- if (isa_ == kNone) {
+ if (isa_ == InstructionSet::kNone) {
Usage("Invalid isa '%s'", option.data());
}
} else if (option.starts_with("--image=")) {
@@ -173,9 +177,28 @@ class DexoptAnalyzer FINAL {
downgrade_ = true;
} else if (option.starts_with("--oat-fd")) {
oat_fd_ = std::stoi(option.substr(strlen("--oat-fd=")).ToString(), nullptr, 0);
+ if (oat_fd_ < 0) {
+ Usage("Invalid --oat-fd %d", oat_fd_);
+ }
} else if (option.starts_with("--vdex-fd")) {
vdex_fd_ = std::stoi(option.substr(strlen("--vdex-fd=")).ToString(), nullptr, 0);
- } else { Usage("Unknown argument '%s'", option.data()); }
+ if (vdex_fd_ < 0) {
+ Usage("Invalid --vdex-fd %d", vdex_fd_);
+ }
+ } else if (option.starts_with("--zip-fd")) {
+ zip_fd_ = std::stoi(option.substr(strlen("--zip-fd=")).ToString(), nullptr, 0);
+ if (zip_fd_ < 0) {
+ Usage("Invalid --zip-fd %d", zip_fd_);
+ }
+ } else if (option.starts_with("--class-loader-context=")) {
+ std::string context_str = option.substr(strlen("--class-loader-context=")).ToString();
+ class_loader_context_ = ClassLoaderContext::Create(context_str);
+ if (class_loader_context_ == nullptr) {
+ Usage("Invalid --class-loader-context '%s'", context_str.c_str());
+ }
+ } else {
+ Usage("Unknown argument '%s'", option.data());
+ }
}
if (image_.empty()) {
@@ -189,12 +212,6 @@ class DexoptAnalyzer FINAL {
Usage("--image unspecified and ANDROID_ROOT not set or image file does not exist.");
}
}
- if (oat_fd_ > 0 && vdex_fd_ < 0) {
- Usage("A valid --vdex-fd must also be provided with --oat-fd.");
- }
- if (oat_fd_ < 0 && vdex_fd_ > 0) {
- Usage("A valid --oat-fd must also be provided with --vdex-fd.");
- }
}
bool CreateRuntime() {
@@ -238,26 +255,20 @@ class DexoptAnalyzer FINAL {
std::unique_ptr<Runtime> runtime(Runtime::Current());
std::unique_ptr<OatFileAssistant> oat_file_assistant;
- if (oat_fd_ != -1 && vdex_fd_ != -1) {
- oat_file_assistant = std::make_unique<OatFileAssistant>(dex_file_.c_str(),
- isa_,
- false /*load_executable*/,
- vdex_fd_,
- oat_fd_);
- } else {
- oat_file_assistant = std::make_unique<OatFileAssistant>(dex_file_.c_str(),
- isa_,
- false /*load_executable*/);
- }
+ oat_file_assistant = std::make_unique<OatFileAssistant>(dex_file_.c_str(),
+ isa_,
+ false /*load_executable*/,
+ vdex_fd_,
+ oat_fd_,
+ zip_fd_);
// Always treat elements of the bootclasspath as up-to-date.
// TODO(calin): this check should be in OatFileAssistant.
if (oat_file_assistant->IsInBootClassPath()) {
return kNoDexOptNeeded;
}
- // TODO(calin): Pass the class loader context as an argument to dexoptanalyzer. b/62269291.
int dexoptNeeded = oat_file_assistant->GetDexOptNeeded(
- compiler_filter_, assume_profile_changed_, downgrade_);
+ compiler_filter_, assume_profile_changed_, downgrade_, class_loader_context_.get());
// Convert OatFileAssitant codes to dexoptanalyzer codes.
switch (dexoptNeeded) {
@@ -280,11 +291,14 @@ class DexoptAnalyzer FINAL {
std::string dex_file_;
InstructionSet isa_;
CompilerFilter::Filter compiler_filter_;
+ std::unique_ptr<ClassLoaderContext> class_loader_context_;
bool assume_profile_changed_;
bool downgrade_;
std::string image_;
int oat_fd_ = -1;
int vdex_fd_ = -1;
+ // File descriptor corresponding to apk, dex_file, or zip.
+ int zip_fd_ = -1;
};
static int dexoptAnalyze(int argc, char** argv) {
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index 5af51c1355..2ed41c860a 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -36,17 +36,17 @@ Disassembler::Disassembler(DisassemblerOptions* disassembler_options)
}
Disassembler* Disassembler::Create(InstructionSet instruction_set, DisassemblerOptions* options) {
- if (instruction_set == kArm || instruction_set == kThumb2) {
+ if (instruction_set == InstructionSet::kArm || instruction_set == InstructionSet::kThumb2) {
return new arm::DisassemblerArm(options);
- } else if (instruction_set == kArm64) {
+ } else if (instruction_set == InstructionSet::kArm64) {
return new arm64::DisassemblerArm64(options);
- } else if (instruction_set == kMips) {
+ } else if (instruction_set == InstructionSet::kMips) {
return new mips::DisassemblerMips(options, /* is_o32_abi */ true);
- } else if (instruction_set == kMips64) {
+ } else if (instruction_set == InstructionSet::kMips64) {
return new mips::DisassemblerMips(options, /* is_o32_abi */ false);
- } else if (instruction_set == kX86) {
+ } else if (instruction_set == InstructionSet::kX86) {
return new x86::DisassemblerX86(options, false);
- } else if (instruction_set == kX86_64) {
+ } else if (instruction_set == InstructionSet::kX86_64) {
return new x86::DisassemblerX86(options, true);
} else {
UNIMPLEMENTED(FATAL) << static_cast<uint32_t>(instruction_set);
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 938ea5dc2f..7c6a3251f7 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -479,15 +479,23 @@ static const MipsInstruction gMipsInstructions[] = {
{ kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0x9, "srli", "kmW" },
{ kMsaMask | (0x3ff << 16), kMsa | (0xbe << 16) | 0x19, "move.v", "km" },
{ kMsaMask | (0xf << 22), kMsa | (0x1 << 22) | 0x19, "splati", "kX" },
+ { kMsaMask | (0xf << 22), kMsa | (0x2 << 22) | 0x19, "copy_s", "yX" },
+ { kMsaMask | (0xf << 22), kMsa | (0x3 << 22) | 0x19, "copy_u", "yX" },
+ { kMsaMask | (0xf << 22), kMsa | (0x4 << 22) | 0x19, "insert", "YD" },
{ kMsaMask | (0xff << 18), kMsa | (0xc0 << 18) | 0x1e, "fill", "vkD" },
{ kMsaMask | (0x7 << 23), kMsa | (0x6 << 23) | 0x7, "ldi", "kx" },
{ kMsaSpecialMask | (0xf << 2), kMsa | (0x8 << 2), "ld", "kw" },
{ kMsaSpecialMask | (0xf << 2), kMsa | (0x9 << 2), "st", "kw" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x4 << 23) | 0x14, "ilvl", "Vkmn" },
{ kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0x14, "ilvr", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x6 << 23) | 0x14, "ilvev", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x7 << 23) | 0x14, "ilvod", "Vkmn" },
{ kMsaMask | (0x7 << 23), kMsa | (0x1 << 23) | 0x12, "maddv", "Vkmn" },
{ kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0x12, "msubv", "Vkmn" },
{ kMsaMask | (0xf << 22), kMsa | (0x4 << 22) | 0x1b, "fmadd", "Ukmn" },
{ kMsaMask | (0xf << 22), kMsa | (0x5 << 22) | 0x1b, "fmsub", "Ukmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x4 << 23) | 0x15, "hadd_s", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0x15, "hadd_u", "Vkmn" },
};
static uint32_t ReadU32(const uint8_t* ptr) {
@@ -760,6 +768,31 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
args << i10;
break;
}
+ case 'Y': // MSA df/n - wd[x].
+ {
+ int32_t df_n = (instruction >> 16) & 0x3f;
+ if ((df_n & (0x3 << 4)) == 0) {
+ opcode += ".b";
+ args << 'w' << sa << '[' << (df_n & 0xf) << ']';
+ break;
+ }
+ if ((df_n & (0x3 << 3)) == 0) {
+ opcode += ".h";
+ args << 'w' << sa << '[' << (df_n & 0x7) << ']';
+ break;
+ }
+ if ((df_n & (0x3 << 2)) == 0) {
+ opcode += ".w";
+ args << 'w' << sa << '[' << (df_n & 0x3) << ']';
+ break;
+ }
+ if ((df_n & (0x3 << 1)) == 0) {
+ opcode += ".d";
+ args << 'w' << sa << '[' << (df_n & 0x1) << ']';
+ }
+ break;
+ }
+ case 'y': args << RegName(sa); break;
}
if (*(args_fmt + 1)) {
args << ", ";
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 84c70bba80..4ab7dcfbb9 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -169,7 +169,7 @@ class OatSymbolizer FINAL {
bss->WriteNoBitsSection(oat_file_->BssSize());
}
- if (isa == kMips || isa == kMips64) {
+ if (isa == InstructionSet::kMips || isa == InstructionSet::kMips64) {
builder_->WriteMIPSabiflagsSection();
}
builder_->PrepareDynamicSection(elf_file->GetPath(),
@@ -917,7 +917,7 @@ class OatDumper {
void AddOffsets(const OatFile::OatMethod& oat_method) {
uint32_t code_offset = oat_method.GetCodeOffset();
- if (oat_file_.GetOatHeader().GetInstructionSet() == kThumb2) {
+ if (oat_file_.GetOatHeader().GetInstructionSet() == InstructionSet::kThumb2) {
code_offset &= ~0x1;
}
offsets_.insert(code_offset);
@@ -1001,16 +1001,16 @@ class OatDumper {
dex_code_bytes_ += code_item->insns_size_in_code_units_ * sizeof(code_ptr[0]);
}
- for (const Instruction& inst : code_item->Instructions()) {
- switch (inst.Opcode()) {
+ for (const DexInstructionPcPair& inst : code_item->Instructions()) {
+ switch (inst->Opcode()) {
case Instruction::CONST_STRING: {
- const dex::StringIndex string_index(inst.VRegB_21c());
+ const dex::StringIndex string_index(inst->VRegB_21c());
unique_string_ids_from_code_.insert(StringReference(&dex_file, string_index));
++num_string_ids_from_code_;
break;
}
case Instruction::CONST_STRING_JUMBO: {
- const dex::StringIndex string_index(inst.VRegB_31c());
+ const dex::StringIndex string_index(inst->VRegB_31c());
unique_string_ids_from_code_.insert(StringReference(&dex_file, string_index));
++num_string_ids_from_code_;
break;
@@ -1625,11 +1625,9 @@ class OatDumper {
void DumpDexCode(std::ostream& os, const DexFile& dex_file, const DexFile::CodeItem* code_item) {
if (code_item != nullptr) {
- IterationRange<DexInstructionIterator> instructions = code_item->Instructions();
- for (auto it = instructions.begin(); it != instructions.end(); ++it) {
- const size_t dex_pc = it.GetDexPC(instructions.begin());
- os << StringPrintf("0x%04zx: ", dex_pc) << it->DumpHexLE(5)
- << StringPrintf("\t| %s\n", it->DumpString(&dex_file).c_str());
+ for (const DexInstructionPcPair& inst : code_item->Instructions()) {
+ os << StringPrintf("0x%04x: ", inst.DexPc()) << inst->DumpHexLE(5)
+ << StringPrintf("\t| %s\n", inst->DumpString(&dex_file).c_str());
}
}
}
@@ -2232,7 +2230,7 @@ class ImageDumper {
os << StringPrintf("null %s\n", PrettyDescriptor(field->GetTypeDescriptor()).c_str());
} else {
// Grab the field type without causing resolution.
- ObjPtr<mirror::Class> field_type = field->GetType<false>();
+ ObjPtr<mirror::Class> field_type = field->LookupType();
if (field_type != nullptr) {
PrettyObjectValue(os, field_type, value);
} else {
@@ -2270,7 +2268,7 @@ class ImageDumper {
if (Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(quick_code)) {
quick_code = oat_dumper_->GetQuickOatCode(m);
}
- if (oat_dumper_->GetInstructionSet() == kThumb2) {
+ if (oat_dumper_->GetInstructionSet() == InstructionSet::kThumb2) {
quick_code = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(quick_code) & ~0x1);
}
return quick_code;
diff --git a/oatdump/oatdump_test.h b/oatdump/oatdump_test.h
index 52fe973c1b..d0f05d9e66 100644
--- a/oatdump/oatdump_test.h
+++ b/oatdump/oatdump_test.h
@@ -24,6 +24,7 @@
#include "android-base/strings.h"
#include "arch/instruction_set.h"
+#include "base/file_utils.h"
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
#include "exec_utils.h"
diff --git a/openjdkjvm/Android.bp b/openjdkjvm/Android.bp
index 761df02553..a17899358c 100644
--- a/openjdkjvm/Android.bp
+++ b/openjdkjvm/Android.bp
@@ -20,7 +20,9 @@ cc_defaults {
srcs: ["OpenjdkJvm.cc"],
shared_libs: [
"libbase",
- "libnativehelper",
+ ],
+ header_libs: [
+ "libnativehelper_header_only",
],
}
diff --git a/openjdkjvm/OpenjdkJvm.cc b/openjdkjvm/OpenjdkJvm.cc
index b212ea1c20..29ebefddea 100644
--- a/openjdkjvm/OpenjdkJvm.cc
+++ b/openjdkjvm/OpenjdkJvm.cc
@@ -53,8 +53,8 @@
#include "mirror/string-inl.h"
#include "monitor.h"
#include "native/scoped_fast_native_object_access-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
diff --git a/openjdkjvmti/Android.bp b/openjdkjvmti/Android.bp
index 84a90d65fd..c6090ef9fc 100644
--- a/openjdkjvmti/Android.bp
+++ b/openjdkjvmti/Android.bp
@@ -24,6 +24,7 @@ cc_defaults {
defaults: ["art_defaults"],
host_supported: true,
srcs: [
+ "deopt_manager.cc",
"events.cc",
"fixed_up_dex_file.cc",
"object_tagging.cc",
@@ -50,10 +51,12 @@ cc_defaults {
"ti_timers.cc",
"transform.cc",
],
- header_libs: ["libopenjdkjvmti_headers"],
+ header_libs: [
+ "libnativehelper_header_only",
+ "libopenjdkjvmti_headers",
+ ],
shared_libs: [
"libbase",
- "libnativehelper",
],
}
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index b30d45ae88..5f726b16e0 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -71,6 +71,7 @@
namespace openjdkjvmti {
EventHandler gEventHandler;
+DeoptManager gDeoptManager;
#define ENSURE_NON_NULL(n) \
do { \
@@ -1676,7 +1677,8 @@ extern const jvmtiInterface_1 gJvmtiInterface;
ArtJvmTiEnv::ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler)
: art_vm(runtime),
local_data(nullptr),
- capabilities() {
+ capabilities(),
+ event_info_mutex_("jvmtiEnv_EventInfoMutex") {
object_tag_table = std::unique_ptr<ObjectTagTable>(new ObjectTagTable(event_handler, this));
functions = &gJvmtiInterface;
}
@@ -1710,6 +1712,7 @@ static jint GetEnvHandler(art::JavaVMExt* vm, /*out*/void** env, jint version) {
extern "C" bool ArtPlugin_Initialize() {
art::Runtime* runtime = art::Runtime::Current();
+ gDeoptManager.Setup();
if (runtime->IsStarted()) {
PhaseUtil::SetToLive();
} else {
@@ -1730,6 +1733,7 @@ extern "C" bool ArtPlugin_Initialize() {
extern "C" bool ArtPlugin_Deinitialize() {
gEventHandler.Shutdown();
+ gDeoptManager.Shutdown();
PhaseUtil::Unregister();
ThreadUtil::Unregister();
ClassUtil::Unregister();
diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h
index ad405e8571..126346088c 100644
--- a/openjdkjvmti/art_jvmti.h
+++ b/openjdkjvmti/art_jvmti.h
@@ -39,10 +39,12 @@
#include <jni.h>
+#include "deopt_manager.h"
#include "base/casts.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/strlcpy.h"
+#include "base/mutex.h"
#include "events.h"
#include "java_vm_ext.h"
#include "jni_env_ext.h"
@@ -77,12 +79,15 @@ struct ArtJvmTiEnv : public jvmtiEnv {
// or by putting a list in the ClassExt of a field's DeclaringClass.
// TODO Maybe just have an extension to let one put a watch on every field, that would probably be
// good enough maybe since you probably want either a few or all/almost all of them.
- std::unordered_set<art::ArtField*> access_watched_fields;
- std::unordered_set<art::ArtField*> modify_watched_fields;
+ std::unordered_set<art::ArtField*> access_watched_fields GUARDED_BY(event_info_mutex_);
+ std::unordered_set<art::ArtField*> modify_watched_fields GUARDED_BY(event_info_mutex_);
// Set of breakpoints is unique to each jvmtiEnv.
- std::unordered_set<Breakpoint> breakpoints;
- std::unordered_set<const art::ShadowFrame*> notify_frames;
+ std::unordered_set<Breakpoint> breakpoints GUARDED_BY(event_info_mutex_);
+ std::unordered_set<const art::ShadowFrame*> notify_frames GUARDED_BY(event_info_mutex_);
+
+ // RW lock to protect access to all of the event data.
+ art::ReaderWriterMutex event_info_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler);
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
new file mode 100644
index 0000000000..f843054681
--- /dev/null
+++ b/openjdkjvmti/deopt_manager.cc
@@ -0,0 +1,322 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <functional>
+
+#include "deopt_manager.h"
+
+#include "art_jvmti.h"
+#include "art_method-inl.h"
+#include "base/enums.h"
+#include "base/mutex-inl.h"
+#include "dex_file_annotations.h"
+#include "events-inl.h"
+#include "jni_internal.h"
+#include "mirror/class-inl.h"
+#include "mirror/object_array-inl.h"
+#include "modifiers.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "runtime_callbacks.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-current-inl.h"
+#include "thread_list.h"
+#include "ti_phase.h"
+
+namespace openjdkjvmti {
+
+// TODO We should make this much more selective in the future so we only return true when we
+// actually care about the method (i.e. had locals changed, have breakpoints, etc.). For now though
+// we can just assume that we care we are loaded at all.
+//
+// Even if we don't keep track of this at the method level we might want to keep track of it at the
+// level of enabled capabilities.
+bool JvmtiMethodInspectionCallback::IsMethodBeingInspected(
+ art::ArtMethod* method ATTRIBUTE_UNUSED) {
+ return true;
+}
+
+bool JvmtiMethodInspectionCallback::IsMethodSafeToJit(art::ArtMethod* method) {
+ return !manager_->MethodHasBreakpoints(method);
+}
+
+DeoptManager::DeoptManager()
+ : deoptimization_status_lock_("JVMTI_DeoptimizationStatusLock"),
+ deoptimization_condition_("JVMTI_DeoptimizationCondition", deoptimization_status_lock_),
+ performing_deoptimization_(false),
+ global_deopt_count_(0),
+ deopter_count_(0),
+ inspection_callback_(this) { }
+
+void DeoptManager::Setup() {
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("Add method Inspection Callback");
+ art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
+ callbacks->AddMethodInspectionCallback(&inspection_callback_);
+}
+
+void DeoptManager::Shutdown() {
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("remove method Inspection Callback");
+ art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
+ callbacks->RemoveMethodInspectionCallback(&inspection_callback_);
+}
+
+bool DeoptManager::MethodHasBreakpoints(art::ArtMethod* method) {
+ art::MutexLock lk(art::Thread::Current(), deoptimization_status_lock_);
+ return MethodHasBreakpointsLocked(method);
+}
+
+bool DeoptManager::MethodHasBreakpointsLocked(art::ArtMethod* method) {
+ if (deopter_count_ == 0) {
+ return false;
+ }
+ auto elem = breakpoint_status_.find(method);
+ return elem != breakpoint_status_.end() && elem->second != 0;
+}
+
+void DeoptManager::RemoveDeoptimizeAllMethods() {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedThreadSuspension sts(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+ RemoveDeoptimizeAllMethodsLocked(self);
+}
+
+void DeoptManager::AddDeoptimizeAllMethods() {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedThreadSuspension sts(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+ AddDeoptimizeAllMethodsLocked(self);
+}
+
+void DeoptManager::AddMethodBreakpoint(art::ArtMethod* method) {
+ DCHECK(method->IsInvokable());
+ DCHECK(!method->IsProxyMethod()) << method->PrettyMethod();
+ DCHECK(!method->IsNative()) << method->PrettyMethod();
+
+ art::Thread* self = art::Thread::Current();
+ method = method->GetCanonicalMethod();
+ bool is_default = method->IsDefault();
+
+ art::ScopedThreadSuspension sts(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+
+ DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
+
+ if (MethodHasBreakpointsLocked(method)) {
+ // Don't need to do anything extra.
+ breakpoint_status_[method]++;
+ // Another thread might be deoptimizing the very method we just added new breakpoints for. Wait
+ // for any deopts to finish before moving on.
+ WaitForDeoptimizationToFinish(self);
+ return;
+ }
+ breakpoint_status_[method] = 1;
+ auto instrumentation = art::Runtime::Current()->GetInstrumentation();
+ if (instrumentation->IsForcedInterpretOnly()) {
+ // We are already interpreting everything so no need to do anything.
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+ return;
+ } else if (is_default) {
+ AddDeoptimizeAllMethodsLocked(self);
+ } else {
+ PerformLimitedDeoptimization(self, method);
+ }
+}
+
+void DeoptManager::RemoveMethodBreakpoint(art::ArtMethod* method) {
+ DCHECK(method->IsInvokable()) << method->PrettyMethod();
+ DCHECK(!method->IsProxyMethod()) << method->PrettyMethod();
+ DCHECK(!method->IsNative()) << method->PrettyMethod();
+
+ art::Thread* self = art::Thread::Current();
+ method = method->GetCanonicalMethod();
+ bool is_default = method->IsDefault();
+
+ art::ScopedThreadSuspension sts(self, art::kSuspended);
+ // Ideally we should do a ScopedSuspendAll right here to get the full mutator_lock_ that we might
+ // need but since that is very heavy we will instead just use a condition variable to make sure we
+ // don't race with ourselves.
+ deoptimization_status_lock_.ExclusiveLock(self);
+
+ DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
+ DCHECK(MethodHasBreakpointsLocked(method)) << "Breakpoint on a method was removed without "
+ << "breakpoints present!";
+ auto instrumentation = art::Runtime::Current()->GetInstrumentation();
+ breakpoint_status_[method] -= 1;
+ if (UNLIKELY(instrumentation->IsForcedInterpretOnly())) {
+ // We don't need to do anything since we are interpreting everything anyway.
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+ return;
+ } else if (breakpoint_status_[method] == 0) {
+ if (UNLIKELY(is_default)) {
+ RemoveDeoptimizeAllMethodsLocked(self);
+ } else {
+ PerformLimitedUndeoptimization(self, method);
+ }
+ } else {
+ // Another thread might be deoptimizing the very methods we just removed breakpoints from. Wait
+ // for any deopts to finish before moving on.
+ WaitForDeoptimizationToFinish(self);
+ }
+}
+
+void DeoptManager::WaitForDeoptimizationToFinishLocked(art::Thread* self) {
+ while (performing_deoptimization_) {
+ deoptimization_condition_.Wait(self);
+ }
+}
+
+void DeoptManager::WaitForDeoptimizationToFinish(art::Thread* self) {
+ WaitForDeoptimizationToFinishLocked(self);
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+}
+
+class ScopedDeoptimizationContext : public art::ValueObject {
+ public:
+ ScopedDeoptimizationContext(art::Thread* self, DeoptManager* deopt)
+ RELEASE(deopt->deoptimization_status_lock_)
+ ACQUIRE(art::Locks::mutator_lock_)
+ ACQUIRE(art::Roles::uninterruptible_)
+ : self_(self), deopt_(deopt), uninterruptible_cause_(nullptr) {
+ deopt_->WaitForDeoptimizationToFinishLocked(self_);
+ DCHECK(!deopt->performing_deoptimization_)
+ << "Already performing deoptimization on another thread!";
+ // Use performing_deoptimization_ to keep track of the lock.
+ deopt_->performing_deoptimization_ = true;
+ deopt_->deoptimization_status_lock_.Unlock(self_);
+ art::Runtime::Current()->GetThreadList()->SuspendAll("JMVTI Deoptimizing methods",
+ /*long_suspend*/ false);
+ uninterruptible_cause_ = self_->StartAssertNoThreadSuspension("JVMTI deoptimizing methods");
+ }
+
+ ~ScopedDeoptimizationContext()
+ RELEASE(art::Locks::mutator_lock_)
+ RELEASE(art::Roles::uninterruptible_) {
+ // Can be suspended again.
+ self_->EndAssertNoThreadSuspension(uninterruptible_cause_);
+ // Release the mutator lock.
+ art::Runtime::Current()->GetThreadList()->ResumeAll();
+ // Let other threads know it's fine to proceed.
+ art::MutexLock lk(self_, deopt_->deoptimization_status_lock_);
+ deopt_->performing_deoptimization_ = false;
+ deopt_->deoptimization_condition_.Broadcast(self_);
+ }
+
+ private:
+ art::Thread* self_;
+ DeoptManager* deopt_;
+ const char* uninterruptible_cause_;
+};
+
+void DeoptManager::AddDeoptimizeAllMethodsLocked(art::Thread* self) {
+ global_deopt_count_++;
+ if (global_deopt_count_ == 1) {
+ PerformGlobalDeoptimization(self);
+ } else {
+ WaitForDeoptimizationToFinish(self);
+ }
+}
+
+void DeoptManager::RemoveDeoptimizeAllMethodsLocked(art::Thread* self) {
+ DCHECK_GT(global_deopt_count_, 0u) << "Request to remove non-existant global deoptimization!";
+ global_deopt_count_--;
+ if (global_deopt_count_ == 0) {
+ PerformGlobalUndeoptimization(self);
+ } else {
+ WaitForDeoptimizationToFinish(self);
+ }
+}
+
+void DeoptManager::PerformLimitedDeoptimization(art::Thread* self, art::ArtMethod* method) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->Deoptimize(method);
+}
+
+void DeoptManager::PerformLimitedUndeoptimization(art::Thread* self, art::ArtMethod* method) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->Undeoptimize(method);
+}
+
+void DeoptManager::PerformGlobalDeoptimization(art::Thread* self) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->DeoptimizeEverything(
+ kDeoptManagerInstrumentationKey);
+}
+
+void DeoptManager::PerformGlobalUndeoptimization(art::Thread* self) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->UndeoptimizeEverything(
+ kDeoptManagerInstrumentationKey);
+}
+
+
+void DeoptManager::RemoveDeoptimizationRequester() {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedThreadStateChange sts(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+ DCHECK_GT(deopter_count_, 0u) << "Removing deoptimization requester without any being present";
+ deopter_count_--;
+ if (deopter_count_ == 0) {
+ ScopedDeoptimizationContext sdc(self, this);
+ // TODO Give this a real key.
+ art::Runtime::Current()->GetInstrumentation()->DisableDeoptimization("");
+ return;
+ } else {
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+ }
+}
+
+void DeoptManager::AddDeoptimizationRequester() {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedThreadStateChange stsc(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+ deopter_count_++;
+ if (deopter_count_ == 1) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->EnableDeoptimization();
+ return;
+ } else {
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+ }
+}
+
+void DeoptManager::DeoptimizeThread(art::Thread* target) {
+ art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
+}
+
+extern DeoptManager gDeoptManager;
+DeoptManager* DeoptManager::Get() {
+ return &gDeoptManager;
+}
+
+} // namespace openjdkjvmti
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
new file mode 100644
index 0000000000..b265fa8ec2
--- /dev/null
+++ b/openjdkjvmti/deopt_manager.h
@@ -0,0 +1,168 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_OPENJDKJVMTI_DEOPT_MANAGER_H_
+#define ART_OPENJDKJVMTI_DEOPT_MANAGER_H_
+
+#include <unordered_map>
+
+#include "jni.h"
+#include "jvmti.h"
+
+#include "base/mutex.h"
+#include "runtime_callbacks.h"
+#include "ti_breakpoint.h"
+
+namespace art {
+class ArtMethod;
+namespace mirror {
+class Class;
+} // namespace mirror
+} // namespace art
+
+namespace openjdkjvmti {
+
+class DeoptManager;
+
+struct JvmtiMethodInspectionCallback : public art::MethodInspectionCallback {
+ public:
+ explicit JvmtiMethodInspectionCallback(DeoptManager* manager) : manager_(manager) {}
+
+ bool IsMethodBeingInspected(art::ArtMethod* method)
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ bool IsMethodSafeToJit(art::ArtMethod* method)
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ private:
+ DeoptManager* manager_;
+};
+
+class ScopedDeoptimizationContext;
+
+class DeoptManager {
+ public:
+ DeoptManager();
+
+ void Setup();
+ void Shutdown();
+
+ void RemoveDeoptimizationRequester() REQUIRES(!deoptimization_status_lock_,
+ !art::Roles::uninterruptible_);
+ void AddDeoptimizationRequester() REQUIRES(!deoptimization_status_lock_,
+ !art::Roles::uninterruptible_);
+ bool MethodHasBreakpoints(art::ArtMethod* method)
+ REQUIRES(!deoptimization_status_lock_);
+
+ void RemoveMethodBreakpoint(art::ArtMethod* method)
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void AddMethodBreakpoint(art::ArtMethod* method)
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void AddDeoptimizeAllMethods()
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void RemoveDeoptimizeAllMethods()
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void DeoptimizeThread(art::Thread* target) REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void DeoptimizeAllThreads() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ static DeoptManager* Get();
+
+ private:
+ bool MethodHasBreakpointsLocked(art::ArtMethod* method)
+ REQUIRES(deoptimization_status_lock_);
+
+ // Wait until nothing is currently in the middle of deoptimizing/undeoptimizing something. This is
+ // needed to ensure that everything is synchronized since threads need to drop the
+ // deoptimization_status_lock_ while deoptimizing methods.
+ void WaitForDeoptimizationToFinish(art::Thread* self)
+ RELEASE(deoptimization_status_lock_) REQUIRES(!art::Locks::mutator_lock_);
+
+ void WaitForDeoptimizationToFinishLocked(art::Thread* self)
+ REQUIRES(deoptimization_status_lock_, !art::Locks::mutator_lock_);
+
+ void AddDeoptimizeAllMethodsLocked(art::Thread* self)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void RemoveDeoptimizeAllMethodsLocked(art::Thread* self)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void PerformGlobalDeoptimization(art::Thread* self)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void PerformGlobalUndeoptimization(art::Thread* self)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void PerformLimitedDeoptimization(art::Thread* self, art::ArtMethod* method)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void PerformLimitedUndeoptimization(art::Thread* self, art::ArtMethod* method)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ static constexpr const char* kDeoptManagerInstrumentationKey = "JVMTI_DeoptManager";
+ // static constexpr const char* kDeoptManagerThreadName = "JVMTI_DeoptManagerWorkerThread";
+
+ art::Mutex deoptimization_status_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ art::ConditionVariable deoptimization_condition_ GUARDED_BY(deoptimization_status_lock_);
+ bool performing_deoptimization_ GUARDED_BY(deoptimization_status_lock_);
+
+ // Number of times we have gotten requests to deopt everything.
+ uint32_t global_deopt_count_ GUARDED_BY(deoptimization_status_lock_);
+
+ // Number of users of deoptimization there currently are.
+ uint32_t deopter_count_ GUARDED_BY(deoptimization_status_lock_);
+
+ // A map from methods to the number of breakpoints in them from all envs.
+ std::unordered_map<art::ArtMethod*, uint32_t> breakpoint_status_
+ GUARDED_BY(deoptimization_status_lock_);
+
+ // The MethodInspectionCallback we use to tell the runtime if we care about particular methods.
+ JvmtiMethodInspectionCallback inspection_callback_;
+
+ // Helper for setting up/tearing-down for deoptimization.
+ friend class ScopedDeoptimizationContext;
+};
+
+} // namespace openjdkjvmti
+#endif // ART_OPENJDKJVMTI_DEOPT_MANAGER_H_
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index ab8e6def2d..7f77f90862 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -21,9 +21,10 @@
#include <type_traits>
#include <tuple>
+#include "base/mutex-inl.h"
#include "events.h"
#include "jni_internal.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
#include "ti_breakpoint.h"
@@ -276,6 +277,7 @@ inline bool EventHandler::ShouldDispatch<ArtJvmtiEvent::kBreakpoint>(
jthread jni_thread ATTRIBUTE_UNUSED,
jmethodID jmethod,
jlocation location) const {
+ art::ReaderMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
art::ArtMethod* method = art::jni::DecodeArtMethod(jmethod);
return ShouldDispatchOnThread<ArtJvmtiEvent::kBreakpoint>(env, thread) &&
env->breakpoints.find({method, location}) != env->breakpoints.end();
@@ -292,6 +294,7 @@ inline bool EventHandler::ShouldDispatch<ArtJvmtiEvent::kFramePop>(
const art::ShadowFrame* frame) const {
// Search for the frame. Do this before checking if we need to send the event so that we don't
// have to deal with use-after-free or the frames being reallocated later.
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
return env->notify_frames.erase(frame) != 0 &&
ShouldDispatchOnThread<ArtJvmtiEvent::kFramePop>(env, thread);
}
@@ -313,6 +316,7 @@ inline bool EventHandler::ShouldDispatch<ArtJvmtiEvent::kFieldModification>(
jfieldID field,
char type_char ATTRIBUTE_UNUSED,
jvalue val ATTRIBUTE_UNUSED) const {
+ art::ReaderMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
return ShouldDispatchOnThread<ArtJvmtiEvent::kFieldModification>(env, thread) &&
env->modify_watched_fields.find(
art::jni::DecodeArtField(field)) != env->modify_watched_fields.end();
@@ -329,6 +333,7 @@ inline bool EventHandler::ShouldDispatch<ArtJvmtiEvent::kFieldAccess>(
jclass field_klass ATTRIBUTE_UNUSED,
jobject object ATTRIBUTE_UNUSED,
jfieldID field) const {
+ art::ReaderMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
return ShouldDispatchOnThread<ArtJvmtiEvent::kFieldAccess>(env, thread) &&
env->access_watched_fields.find(
art::jni::DecodeArtField(field)) != env->access_watched_fields.end();
@@ -475,6 +480,7 @@ inline bool EventHandler::NeedsEventUpdate(ArtJvmTiEnv* env,
ArtJvmtiEvent event = added ? ArtJvmtiEvent::kClassFileLoadHookNonRetransformable
: ArtJvmtiEvent::kClassFileLoadHookRetransformable;
return (added && caps.can_access_local_variables == 1) ||
+ caps.can_generate_breakpoint_events == 1 ||
(caps.can_retransform_classes == 1 &&
IsEventEnabledAnywhere(event) &&
env->event_masks.IsEnabledAnywhere(event));
@@ -492,6 +498,9 @@ inline void EventHandler::HandleChangedCapabilities(ArtJvmTiEnv* env,
if (added && caps.can_access_local_variables == 1) {
HandleLocalAccessCapabilityAdded();
}
+ if (caps.can_generate_breakpoint_events == 1) {
+ HandleBreakpointEventsChanged(added);
+ }
}
}
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 0282fbce1f..6a64441a4a 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -37,6 +37,7 @@
#include "art_jvmti.h"
#include "art_method-inl.h"
#include "base/logging.h"
+#include "deopt_manager.h"
#include "dex_file_types.h"
#include "gc/allocation_listener.h"
#include "gc/gc_pause_listener.h"
@@ -49,7 +50,7 @@
#include "mirror/class.h"
#include "mirror/object-inl.h"
#include "monitor.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
@@ -810,9 +811,49 @@ static uint32_t GetInstrumentationEventsFor(ArtJvmtiEvent event) {
}
}
+static bool EventNeedsFullDeopt(ArtJvmtiEvent event) {
+ switch (event) {
+ case ArtJvmtiEvent::kBreakpoint:
+ case ArtJvmtiEvent::kException:
+ return false;
+ // TODO We should support more of these or at least do something to make them discriminate by
+ // thread.
+ case ArtJvmtiEvent::kMethodEntry:
+ case ArtJvmtiEvent::kExceptionCatch:
+ case ArtJvmtiEvent::kMethodExit:
+ case ArtJvmtiEvent::kFieldModification:
+ case ArtJvmtiEvent::kFieldAccess:
+ case ArtJvmtiEvent::kSingleStep:
+ case ArtJvmtiEvent::kFramePop:
+ return true;
+ default:
+ LOG(FATAL) << "Unexpected event type!";
+ UNREACHABLE();
+ }
+}
+
static void SetupTraceListener(JvmtiMethodTraceListener* listener,
ArtJvmtiEvent event,
bool enable) {
+ bool needs_full_deopt = EventNeedsFullDeopt(event);
+ // Make sure we can deopt.
+ {
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ DeoptManager* deopt_manager = DeoptManager::Get();
+ if (enable) {
+ deopt_manager->AddDeoptimizationRequester();
+ if (needs_full_deopt) {
+ deopt_manager->AddDeoptimizeAllMethods();
+ }
+ } else {
+ if (needs_full_deopt) {
+ deopt_manager->RemoveDeoptimizeAllMethods();
+ }
+ deopt_manager->RemoveDeoptimizationRequester();
+ }
+ }
+
+ // Add the actual listeners.
art::ScopedThreadStateChange stsc(art::Thread::Current(), art::ThreadState::kNative);
uint32_t new_events = GetInstrumentationEventsFor(event);
art::instrumentation::Instrumentation* instr = art::Runtime::Current()->GetInstrumentation();
@@ -821,11 +862,6 @@ static void SetupTraceListener(JvmtiMethodTraceListener* listener,
art::gc::kCollectorTypeInstrumentation);
art::ScopedSuspendAll ssa("jvmti method tracing installation");
if (enable) {
- // TODO Depending on the features being used we should be able to avoid deoptimizing everything
- // like we do here.
- if (!instr->AreAllMethodsDeoptimized()) {
- instr->EnableMethodTracing("jvmti-tracing", /*needs_interpreter*/true);
- }
instr->AddListener(listener, new_events);
} else {
instr->RemoveListener(listener, new_events);
@@ -910,6 +946,7 @@ void EventHandler::HandleEventType(ArtJvmtiEvent event, bool enable) {
}
// FramePop can never be disabled once it's been turned on since we would either need to deal
// with dangling pointers or have missed events.
+ // TODO We really need to make this not the case anymore.
case ArtJvmtiEvent::kFramePop:
if (!enable || (enable && frame_pop_enabled)) {
break;
@@ -1046,6 +1083,14 @@ jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
return ERR(NONE);
}
+void EventHandler::HandleBreakpointEventsChanged(bool added) {
+ if (added) {
+ DeoptManager::Get()->AddDeoptimizationRequester();
+ } else {
+ DeoptManager::Get()->RemoveDeoptimizationRequester();
+ }
+}
+
void EventHandler::Shutdown() {
// Need to remove the method_trace_listener_ if it's there.
art::Thread* self = art::Thread::Current();
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index a062e1589e..aed24e59f3 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -232,6 +232,7 @@ class EventHandler {
void HandleEventType(ArtJvmtiEvent event, bool enable);
void HandleLocalAccessCapabilityAdded();
+ void HandleBreakpointEventsChanged(bool enable);
bool OtherMonitorEventsEnabledAnywhere(ArtJvmtiEvent event);
diff --git a/openjdkjvmti/jvmti_weak_table-inl.h b/openjdkjvmti/jvmti_weak_table-inl.h
index 1c82255fff..5d20946070 100644
--- a/openjdkjvmti/jvmti_weak_table-inl.h
+++ b/openjdkjvmti/jvmti_weak_table-inl.h
@@ -44,7 +44,7 @@
#include "jvmti_allocator.h"
#include "mirror/class.h"
#include "mirror/object.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "runtime.h"
namespace openjdkjvmti {
diff --git a/openjdkjvmti/ti_breakpoint.cc b/openjdkjvmti/ti_breakpoint.cc
index f5116a8080..8e5b56e9bf 100644
--- a/openjdkjvmti/ti_breakpoint.cc
+++ b/openjdkjvmti/ti_breakpoint.cc
@@ -36,13 +36,15 @@
#include "art_jvmti.h"
#include "art_method-inl.h"
#include "base/enums.h"
+#include "base/mutex-inl.h"
+#include "deopt_manager.h"
#include "dex_file_annotations.h"
#include "events-inl.h"
#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
#include "modifiers.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
@@ -63,16 +65,29 @@ Breakpoint::Breakpoint(art::ArtMethod* m, jlocation loc) : method_(m), location_
void BreakpointUtil::RemoveBreakpointsInClass(ArtJvmTiEnv* env, art::mirror::Class* klass) {
std::vector<Breakpoint> to_remove;
- for (const Breakpoint& b : env->breakpoints) {
- if (b.GetMethod()->GetDeclaringClass() == klass) {
- to_remove.push_back(b);
+ {
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
+ for (const Breakpoint& b : env->breakpoints) {
+ if (b.GetMethod()->GetDeclaringClass() == klass) {
+ to_remove.push_back(b);
+ }
+ }
+ for (const Breakpoint& b : to_remove) {
+ auto it = env->breakpoints.find(b);
+ DCHECK(it != env->breakpoints.end());
+ env->breakpoints.erase(it);
}
}
- for (const Breakpoint& b : to_remove) {
- auto it = env->breakpoints.find(b);
- DCHECK(it != env->breakpoints.end());
- env->breakpoints.erase(it);
+ if (!to_remove.empty()) {
+ LOG(WARNING) << "Methods with breakpoints potentially not being un-deoptimized.";
}
+ // TODO Figure out how to do this.
+ // DeoptManager* deopt = DeoptManager::Get();
+ // for (const Breakpoint& b : to_remove) {
+ // // TODO It might be good to send these all at once instead.
+ // // deopt->RemoveMethodBreakpointSuspended(b.GetMethod());
+ // LOG(WARNING) << "not un-deopting methods! :-0";
+ // }
}
jvmtiError BreakpointUtil::SetBreakpoint(jvmtiEnv* jenv, jmethodID method, jlocation location) {
@@ -80,19 +95,23 @@ jvmtiError BreakpointUtil::SetBreakpoint(jvmtiEnv* jenv, jmethodID method, jloca
if (method == nullptr) {
return ERR(INVALID_METHODID);
}
- // Need to get mutator_lock_ so we can find the interface version of any default methods.
art::ScopedObjectAccess soa(art::Thread::Current());
art::ArtMethod* art_method = art::jni::DecodeArtMethod(method)->GetCanonicalMethod();
if (location < 0 || static_cast<uint32_t>(location) >=
art_method->GetCodeItem()->insns_size_in_code_units_) {
return ERR(INVALID_LOCATION);
}
- auto res_pair = env->breakpoints.insert(/* Breakpoint */ {art_method, location});
- if (!res_pair.second) {
- // Didn't get inserted because it's already present!
- return ERR(DUPLICATE);
+ DeoptManager::Get()->AddMethodBreakpoint(art_method);
+ {
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
+ auto res_pair = env->breakpoints.insert(/* Breakpoint */ {art_method, location});
+ if (LIKELY(res_pair.second)) {
+ return OK;
+ }
}
- return OK;
+ // Didn't get inserted because it's already present!
+ DeoptManager::Get()->RemoveMethodBreakpoint(art_method);
+ return ERR(DUPLICATE);
}
jvmtiError BreakpointUtil::ClearBreakpoint(jvmtiEnv* jenv, jmethodID method, jlocation location) {
@@ -100,14 +119,17 @@ jvmtiError BreakpointUtil::ClearBreakpoint(jvmtiEnv* jenv, jmethodID method, jlo
if (method == nullptr) {
return ERR(INVALID_METHODID);
}
- // Need to get mutator_lock_ so we can find the interface version of any default methods.
art::ScopedObjectAccess soa(art::Thread::Current());
- auto pos = env->breakpoints.find(
- /* Breakpoint */ {art::jni::DecodeArtMethod(method)->GetCanonicalMethod(), location});
- if (pos == env->breakpoints.end()) {
- return ERR(NOT_FOUND);
+ art::ArtMethod* art_method = art::jni::DecodeArtMethod(method)->GetCanonicalMethod();
+ {
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
+ auto pos = env->breakpoints.find(/* Breakpoint */ {art_method, location});
+ if (pos == env->breakpoints.end()) {
+ return ERR(NOT_FOUND);
+ }
+ env->breakpoints.erase(pos);
}
- env->breakpoints.erase(pos);
+ DeoptManager::Get()->RemoveMethodBreakpoint(art_method);
return OK;
}
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index 5f29416134..e69c78bab1 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -60,7 +60,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/object_reference.h"
#include "mirror/reference.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "primitive.h"
#include "reflection.h"
#include "runtime.h"
diff --git a/openjdkjvmti/ti_class_loader.cc b/openjdkjvmti/ti_class_loader.cc
index e81e4bc803..b551b55e18 100644
--- a/openjdkjvmti/ti_class_loader.cc
+++ b/openjdkjvmti/ti_class_loader.cc
@@ -51,7 +51,7 @@
#include "mirror/class.h"
#include "mirror/class_ext.h"
#include "mirror/object.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "object_lock.h"
#include "runtime.h"
#include "transform.h"
diff --git a/openjdkjvmti/ti_field.cc b/openjdkjvmti/ti_field.cc
index c45b926695..b8691837eb 100644
--- a/openjdkjvmti/ti_field.cc
+++ b/openjdkjvmti/ti_field.cc
@@ -189,6 +189,7 @@ jvmtiError FieldUtil::IsFieldSynthetic(jvmtiEnv* env ATTRIBUTE_UNUSED,
jvmtiError FieldUtil::SetFieldModificationWatch(jvmtiEnv* jenv, jclass klass, jfieldID field) {
ArtJvmTiEnv* env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
if (klass == nullptr) {
return ERR(INVALID_CLASS);
}
@@ -205,6 +206,7 @@ jvmtiError FieldUtil::SetFieldModificationWatch(jvmtiEnv* jenv, jclass klass, jf
jvmtiError FieldUtil::ClearFieldModificationWatch(jvmtiEnv* jenv, jclass klass, jfieldID field) {
ArtJvmTiEnv* env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
if (klass == nullptr) {
return ERR(INVALID_CLASS);
}
@@ -221,6 +223,7 @@ jvmtiError FieldUtil::ClearFieldModificationWatch(jvmtiEnv* jenv, jclass klass,
jvmtiError FieldUtil::SetFieldAccessWatch(jvmtiEnv* jenv, jclass klass, jfieldID field) {
ArtJvmTiEnv* env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
if (klass == nullptr) {
return ERR(INVALID_CLASS);
}
@@ -237,6 +240,7 @@ jvmtiError FieldUtil::SetFieldAccessWatch(jvmtiEnv* jenv, jclass klass, jfieldID
jvmtiError FieldUtil::ClearFieldAccessWatch(jvmtiEnv* jenv, jclass klass, jfieldID field) {
ArtJvmTiEnv* env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
if (klass == nullptr) {
return ERR(INVALID_CLASS);
}
diff --git a/openjdkjvmti/ti_field.h b/openjdkjvmti/ti_field.h
index 8a229ed19d..3cf29f099a 100644
--- a/openjdkjvmti/ti_field.h
+++ b/openjdkjvmti/ti_field.h
@@ -35,6 +35,8 @@
#include "jni.h"
#include "jvmti.h"
+#include "art_jvmti.h"
+
namespace openjdkjvmti {
class FieldUtil {
@@ -61,10 +63,14 @@ class FieldUtil {
jfieldID field,
jboolean* is_synthetic_ptr);
- static jvmtiError SetFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field);
- static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field);
- static jvmtiError SetFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field);
- static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field);
+ static jvmtiError SetFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field)
+ REQUIRES(!ArtJvmTiEnv::event_info_mutex_);
+ static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field)
+ REQUIRES(!ArtJvmTiEnv::event_info_mutex_);
+ static jvmtiError SetFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field)
+ REQUIRES(!ArtJvmTiEnv::event_info_mutex_);
+ static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field)
+ REQUIRES(!ArtJvmTiEnv::event_info_mutex_);
};
} // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 50402a04a9..cf93bf0fb0 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -47,7 +47,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "modifiers.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
@@ -86,21 +86,6 @@ struct TiMethodCallback : public art::MethodCallback {
TiMethodCallback gMethodCallback;
-// TODO We should make this much more selective in the future so we only return true when we
-// actually care about the method (i.e. had locals changed, have breakpoints, etc.). For now though
-// we can just assume that we care we are loaded at all.
-//
-// Even if we don't keep track of this at the method level we might want to keep track of it at the
-// level of enabled capabilities.
-struct TiMethodInspectionCallback : public art::MethodInspectionCallback {
- bool IsMethodBeingInspected(art::ArtMethod* method ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return true;
- }
-};
-
-TiMethodInspectionCallback gMethodInspectionCallback;
-
void MethodUtil::Register(EventHandler* handler) {
gMethodCallback.event_handler = handler;
art::ScopedThreadStateChange stsc(art::Thread::Current(),
@@ -108,7 +93,6 @@ void MethodUtil::Register(EventHandler* handler) {
art::ScopedSuspendAll ssa("Add method callback");
art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
callbacks->AddMethodCallback(&gMethodCallback);
- callbacks->AddMethodInspectionCallback(&gMethodInspectionCallback);
}
void MethodUtil::Unregister() {
@@ -117,7 +101,6 @@ void MethodUtil::Unregister() {
art::ScopedSuspendAll ssa("Remove method callback");
art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
callbacks->RemoveMethodCallback(&gMethodCallback);
- callbacks->AddMethodInspectionCallback(&gMethodInspectionCallback);
}
jvmtiError MethodUtil::GetBytecodes(jvmtiEnv* env,
@@ -779,14 +762,16 @@ jvmtiError MethodUtil::GetLocalVariableGeneric(jvmtiEnv* env ATTRIBUTE_UNUSED,
// Suspend JIT since it can get confused if we deoptimize methods getting jitted.
art::jit::ScopedJitSuspend suspend_jit;
art::ScopedObjectAccess soa(self);
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
GetLocalVariableClosure c(self, depth, slot, type, val);
- if (!target->RequestSynchronousCheckpoint(&c)) {
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &c)) {
return ERR(THREAD_NOT_ALIVE);
} else {
return c.GetResult();
@@ -906,14 +891,16 @@ jvmtiError MethodUtil::SetLocalVariableGeneric(jvmtiEnv* env ATTRIBUTE_UNUSED,
// Suspend JIT since it can get confused if we deoptimize methods getting jitted.
art::jit::ScopedJitSuspend suspend_jit;
art::ScopedObjectAccess soa(self);
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
SetLocalVariableClosure c(self, depth, slot, type, val);
- if (!target->RequestSynchronousCheckpoint(&c)) {
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &c)) {
return ERR(THREAD_NOT_ALIVE);
} else {
return c.GetResult();
@@ -963,14 +950,16 @@ jvmtiError MethodUtil::GetLocalInstance(jvmtiEnv* env ATTRIBUTE_UNUSED,
}
art::Thread* self = art::Thread::Current();
art::ScopedObjectAccess soa(self);
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
GetLocalInstanceClosure c(self, depth, data);
- if (!target->RequestSynchronousCheckpoint(&c)) {
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &c)) {
return ERR(THREAD_NOT_ALIVE);
} else {
return c.GetResult();
diff --git a/openjdkjvmti/ti_monitor.cc b/openjdkjvmti/ti_monitor.cc
index 5a38f46901..7db0566a2e 100644
--- a/openjdkjvmti/ti_monitor.cc
+++ b/openjdkjvmti/ti_monitor.cc
@@ -334,10 +334,11 @@ jvmtiError MonitorUtil::GetCurrentContendedMonitor(jvmtiEnv* env ATTRIBUTE_UNUSE
}
art::Thread* self = art::Thread::Current();
art::ScopedObjectAccess soa(self);
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
struct GetContendedMonitorClosure : public art::Closure {
@@ -393,7 +394,8 @@ jvmtiError MonitorUtil::GetCurrentContendedMonitor(jvmtiEnv* env ATTRIBUTE_UNUSE
jobject* out_;
};
GetContendedMonitorClosure closure(self, monitor);
- if (!target->RequestSynchronousCheckpoint(&closure)) {
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &closure)) {
return ERR(THREAD_NOT_ALIVE);
}
return OK;
diff --git a/openjdkjvmti/ti_phase.cc b/openjdkjvmti/ti_phase.cc
index 07cf31c354..23df27fbda 100644
--- a/openjdkjvmti/ti_phase.cc
+++ b/openjdkjvmti/ti_phase.cc
@@ -34,7 +34,7 @@
#include "art_jvmti.h"
#include "base/macros.h"
#include "events-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "runtime.h"
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/openjdkjvmti/ti_properties.cc b/openjdkjvmti/ti_properties.cc
index c412814d8d..4fb3070e93 100644
--- a/openjdkjvmti/ti_properties.cc
+++ b/openjdkjvmti/ti_properties.cc
@@ -35,8 +35,8 @@
#include <vector>
#include "jni.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "art_jvmti.h"
#include "runtime.h"
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 53abfbca00..c4f16f5e2d 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -62,7 +62,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_ext.h"
#include "mirror/object.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "non_debuggable_classes.h"
#include "object_lock.h"
#include "runtime.h"
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index bafc8552b1..fe12a25151 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -44,7 +44,7 @@
#include "mirror/class-inl.h"
#include "mirror/object.h"
#include "mirror/string.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr-inl.h"
#include "runtime.h"
#include "runtime_callbacks.h"
@@ -227,7 +227,8 @@ jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env ATTRIBUTE_U
std::string error_msg;
std::vector<std::unique_ptr<const art::DexFile>> dex_files;
- if (!art::DexFileLoader::Open(segment, segment, true, &error_msg, &dex_files)) {
+ if (!art::DexFileLoader::Open(
+ segment, segment, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files)) {
LOG(WARNING) << "Could not open " << segment << " for boot classpath extension: " << error_msg;
return ERR(ILLEGAL_ARGUMENT);
}
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index e0c139954d..b43eaa0286 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -53,7 +53,7 @@
#include "jni_internal.h"
#include "mirror/class.h"
#include "mirror/dex_cache.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
#include "ti_thread.h"
@@ -220,28 +220,33 @@ jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
// It is not great that we have to hold these locks for so long, but it is necessary to ensure
// that the thread isn't dying on us.
art::ScopedObjectAccess soa(art::Thread::Current());
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
art::Thread* thread;
jvmtiError thread_error = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return thread_error;
}
DCHECK(thread != nullptr);
art::ThreadState state = thread->GetState();
if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(THREAD_NOT_ALIVE);
}
if (max_frame_count < 0) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(ILLEGAL_ARGUMENT);
}
if (frame_buffer == nullptr || count_ptr == nullptr) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(NULL_POINTER);
}
if (max_frame_count == 0) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
*count_ptr = 0;
return ERR(NONE);
}
@@ -251,23 +256,29 @@ jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
GetStackTraceDirectClosure closure(frame_buffer,
static_cast<size_t>(start_depth),
static_cast<size_t>(max_frame_count));
- thread->RequestSynchronousCheckpoint(&closure);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
*count_ptr = static_cast<jint>(closure.index);
if (closure.index < static_cast<size_t>(start_depth)) {
return ERR(ILLEGAL_ARGUMENT);
}
return ERR(NONE);
- }
-
- GetStackTraceVectorClosure closure(0, 0);
- thread->RequestSynchronousCheckpoint(&closure);
+ } else {
+ GetStackTraceVectorClosure closure(0, 0);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
- return TranslateFrameVector(closure.frames,
- start_depth,
- closure.start_result,
- max_frame_count,
- frame_buffer,
- count_ptr);
+ return TranslateFrameVector(closure.frames,
+ start_depth,
+ closure.start_result,
+ max_frame_count,
+ frame_buffer,
+ count_ptr);
+ }
}
template <typename Data>
@@ -678,26 +689,30 @@ jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
// It is not great that we have to hold these locks for so long, but it is necessary to ensure
// that the thread isn't dying on us.
art::ScopedObjectAccess soa(art::Thread::Current());
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
art::Thread* thread;
jvmtiError thread_error = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return thread_error;
}
DCHECK(thread != nullptr);
art::ThreadState state = thread->GetState();
if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(THREAD_NOT_ALIVE);
}
if (count_ptr == nullptr) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(NULL_POINTER);
}
GetFrameCountClosure closure;
- if (!thread->RequestSynchronousCheckpoint(&closure)) {
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) {
return ERR(THREAD_NOT_ALIVE);
}
@@ -760,29 +775,36 @@ jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
// It is not great that we have to hold these locks for so long, but it is necessary to ensure
// that the thread isn't dying on us.
art::ScopedObjectAccess soa(art::Thread::Current());
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
art::Thread* thread;
jvmtiError thread_error = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return thread_error;
}
DCHECK(thread != nullptr);
art::ThreadState state = thread->GetState();
if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(THREAD_NOT_ALIVE);
}
if (depth < 0) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(ILLEGAL_ARGUMENT);
}
if (method_ptr == nullptr || location_ptr == nullptr) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(NULL_POINTER);
}
GetLocationClosure closure(static_cast<size_t>(depth));
- thread->RequestSynchronousCheckpoint(&closure);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
if (closure.method == nullptr) {
return ERR(NO_MORE_FRAMES);
@@ -891,17 +913,21 @@ static jvmtiError GetOwnedMonitorInfoCommon(jthread thread, Fn handle_results) {
MonitorInfoClosure<Fn> closure(soa, handle_results);
bool called_method = false;
{
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
if (target != self) {
called_method = true;
- if (!target->RequestSynchronousCheckpoint(&closure)) {
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &closure)) {
return ERR(THREAD_NOT_ALIVE);
}
+ } else {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
}
}
// Cannot call the closure on the current thread if we have thread_list_lock since we need to call
@@ -1024,9 +1050,12 @@ jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth)
method,
visitor.GetDexPc());
}
- // Mark shadow frame as needs_notify_pop_
- shadow_frame->SetNotifyPop(true);
- tienv->notify_frames.insert(shadow_frame);
+ {
+ art::WriterMutexLock lk(self, tienv->event_info_mutex_);
+ // Mark shadow frame as needs_notify_pop_
+ shadow_frame->SetNotifyPop(true);
+ tienv->notify_frames.insert(shadow_frame);
+ }
// Make sure can we will go to the interpreter and use the shadow frames.
if (needs_instrument) {
art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index d0913cf859..6d075a6b7b 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -38,12 +38,15 @@
#include "base/mutex.h"
#include "events-inl.h"
#include "gc/system_weak.h"
+#include "gc/collector_type.h"
+#include "gc/gc_cause.h"
+#include "gc/scoped_gc_critical_section.h"
#include "gc_root-inl.h"
#include "jni_internal.h"
#include "mirror/class.h"
#include "mirror/object-inl.h"
#include "mirror/string.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr.h"
#include "runtime.h"
#include "runtime_callbacks.h"
@@ -706,7 +709,7 @@ static void* AgentCallback(void* arg) {
// We already have a peer. So call our special Attach function.
art::Thread* self = art::Thread::Attach("JVMTI Agent thread", true, data->thread);
- CHECK(self != nullptr);
+ CHECK(self != nullptr) << "threads_being_born_ should have ensured thread could be attached.";
// The name in Attach() is only for logging. Set the thread name. This is important so
// that the thread is no longer seen as starting up.
{
@@ -719,6 +722,13 @@ static void* AgentCallback(void* arg) {
env->DeleteGlobalRef(data->thread);
data->thread = nullptr;
+ {
+ // The StartThreadBirth was called in the parent thread. We let the runtime know we are up
+ // before going into the provided code.
+ art::MutexLock mu(art::Thread::Current(), *art::Locks::runtime_shutdown_lock_);
+ art::Runtime::Current()->EndThreadBirth();
+ }
+
// Run the agent code.
data->proc(data->jvmti_env, env, const_cast<void*>(data->arg));
@@ -748,6 +758,21 @@ jvmtiError ThreadUtil::RunAgentThread(jvmtiEnv* jvmti_env,
return ERR(NULL_POINTER);
}
+ {
+ art::Runtime* runtime = art::Runtime::Current();
+ art::MutexLock mu(art::Thread::Current(), *art::Locks::runtime_shutdown_lock_);
+ if (runtime->IsShuttingDownLocked()) {
+ // The runtime is shutting down so we cannot create new threads.
+ // TODO It's not fully clear from the spec what we should do here. We aren't yet in
+ // JVMTI_PHASE_DEAD so we cannot return ERR(WRONG_PHASE) but creating new threads is now
+ // impossible. Existing agents don't seem to generally do anything with this return value so
+ // it doesn't matter too much. We could do something like sending a fake ThreadStart event
+ // even though code is never actually run.
+ return ERR(INTERNAL);
+ }
+ runtime->StartThreadBirth();
+ }
+
std::unique_ptr<AgentData> data(new AgentData);
data->arg = arg;
data->proc = proc;
@@ -759,10 +784,14 @@ jvmtiError ThreadUtil::RunAgentThread(jvmtiEnv* jvmti_env,
pthread_t pthread;
int pthread_create_result = pthread_create(&pthread,
- nullptr,
- &AgentCallback,
- reinterpret_cast<void*>(data.get()));
+ nullptr,
+ &AgentCallback,
+ reinterpret_cast<void*>(data.get()));
if (pthread_create_result != 0) {
+ // If the create succeeded the other thread will call EndThreadBirth.
+ art::Runtime* runtime = art::Runtime::Current();
+ art::MutexLock mu(art::Thread::Current(), *art::Locks::runtime_shutdown_lock_);
+ runtime->EndThreadBirth();
return ERR(INTERNAL);
}
data.release();
@@ -1008,12 +1037,14 @@ jvmtiError ThreadUtil::StopThread(jvmtiEnv* env ATTRIBUTE_UNUSED,
return ERR(INVALID_OBJECT);
}
art::Handle<art::mirror::Throwable> exc(hs.NewHandle(obj->AsThrowable()));
- art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
} else if (target->GetState() == art::ThreadState::kStarting || target->IsStillStarting()) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return ERR(THREAD_NOT_ALIVE);
}
struct StopThreadClosure : public art::Closure {
@@ -1032,7 +1063,8 @@ jvmtiError ThreadUtil::StopThread(jvmtiEnv* env ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Throwable> exception_;
};
StopThreadClosure c(exc);
- if (target->RequestSynchronousCheckpoint(&c)) {
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (RequestGCSafeSynchronousCheckpoint(target, &c)) {
return OK;
} else {
// Something went wrong, probably the thread died.
@@ -1055,4 +1087,29 @@ jvmtiError ThreadUtil::InterruptThread(jvmtiEnv* env ATTRIBUTE_UNUSED, jthread t
return OK;
}
+class GcCriticalSectionClosure : public art::Closure {
+ public:
+ explicit GcCriticalSectionClosure(art::Closure* wrapped) : wrapped_(wrapped) {}
+
+ void Run(art::Thread* self) OVERRIDE {
+ if (art::kIsDebugBuild) {
+ art::Locks::thread_list_lock_->AssertNotHeld(art::Thread::Current());
+ }
+ // This might block as it waits for any in-progress GCs to finish but this is fine since we
+ // released the Thread-list-lock prior to calling this in RequestSynchronousCheckpoint.
+ art::gc::ScopedGCCriticalSection sgccs(art::Thread::Current(),
+ art::gc::kGcCauseDebugger,
+ art::gc::kCollectorTypeDebugger);
+ wrapped_->Run(self);
+ }
+
+ private:
+ art::Closure* wrapped_;
+};
+
+bool ThreadUtil::RequestGCSafeSynchronousCheckpoint(art::Thread* thr, art::Closure* function) {
+ GcCriticalSectionClosure gccsc(function);
+ return thr->RequestSynchronousCheckpoint(&gccsc);
+}
+
} // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_thread.h b/openjdkjvmti/ti_thread.h
index 09b4cabcfc..341bffe51e 100644
--- a/openjdkjvmti/ti_thread.h
+++ b/openjdkjvmti/ti_thread.h
@@ -42,6 +42,7 @@ namespace art {
class ArtField;
class ScopedObjectAccessAlreadyRunnable;
class Thread;
+class Closure;
} // namespace art
namespace openjdkjvmti {
@@ -133,6 +134,16 @@ class ThreadUtil {
REQUIRES(!art::Locks::user_code_suspension_lock_,
!art::Locks::thread_suspend_count_lock_);
+ // This will request a synchronous checkpoint in such a way as to prevent gc races if a local
+ // variable is taken from one thread's stack and placed in the stack of another thread.
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
+ // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
+ // execute the checkpoint for us if it is Runnable.
+ static bool RequestGCSafeSynchronousCheckpoint(art::Thread* thr, art::Closure* function)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ RELEASE(art::Locks::thread_list_lock_)
+ REQUIRES(!art::Locks::thread_suspend_count_lock_);
+
private:
// We need to make sure only one thread tries to suspend threads at a time so we can get the
// 'suspend-only-once' behavior the spec requires. Internally, ART considers suspension to be a
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index edce5b4aa9..ae82d72c0f 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -129,7 +129,7 @@ bool PatchOat::Patch(const std::string& image_location,
TimingLogger::ScopedTiming t("Runtime Setup", timings);
- CHECK_NE(isa, kNone);
+ CHECK_NE(isa, InstructionSet::kNone);
const char* isa_name = GetInstructionSetString(isa);
// Set up the runtime
@@ -807,7 +807,7 @@ static int patchoat(int argc, char **argv) {
// cmd line args
bool isa_set = false;
- InstructionSet isa = kNone;
+ InstructionSet isa = InstructionSet::kNone;
std::string input_image_location;
std::string output_image_filename;
off_t base_delta = 0;
@@ -824,7 +824,7 @@ static int patchoat(int argc, char **argv) {
isa_set = true;
const char* isa_str = option.substr(strlen("--instruction-set=")).data();
isa = GetInstructionSetFromString(isa_str);
- if (isa == kNone) {
+ if (isa == InstructionSet::kNone) {
Usage("Unknown or invalid instruction set %s", isa_str);
}
} else if (option.starts_with("--input-image-location=")) {
diff --git a/profman/profman.cc b/profman/profman.cc
index 8ccf7b4c1d..4c4bb87e49 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -331,6 +331,7 @@ class ProfMan FINAL {
if (use_apk_fd_list) {
if (DexFileLoader::OpenZip(apks_fd_[i],
dex_locations_[i],
+ /* verify */ true,
kVerifyChecksum,
&error_msg,
&dex_files_for_location)) {
@@ -341,6 +342,7 @@ class ProfMan FINAL {
} else {
if (DexFileLoader::Open(apk_files_[i].c_str(),
dex_locations_[i],
+ /* verify */ true,
kVerifyChecksum,
&error_msg,
&dex_files_for_location)) {
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 904da4bbd6..e032238324 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -35,6 +35,7 @@ cc_defaults {
"base/arena_bit_vector.cc",
"base/bit_vector.cc",
"base/file_magic.cc",
+ "base/file_utils.cc",
"base/hex_dump.cc",
"base/logging.cc",
"base/mutex.cc",
@@ -152,6 +153,7 @@ cc_defaults {
"mirror/stack_trace_element.cc",
"mirror/string.cc",
"mirror/throwable.cc",
+ "mirror/var_handle.cc",
"monitor.cc",
"native_bridge_art_interface.cc",
"native_stack_dump.cc",
@@ -203,6 +205,7 @@ cc_defaults {
"runtime.cc",
"runtime_callbacks.cc",
"runtime_common.cc",
+ "runtime_intrinsics.cc",
"runtime_options.cc",
"scoped_thread_state_change.cc",
"signal_catcher.cc",
@@ -402,13 +405,16 @@ cc_defaults {
export_generated_headers: ["cpp-define-generator-asm-support"],
include_dirs: [
"art/sigchainlib",
- "art",
+ "external/icu/icu4c/source/common",
+ "external/lz4/lib",
+ "external/zlib",
],
header_libs: [
"art_cmdlineparser_headers",
+ "libnativehelper_header_only",
+ "jni_platform_headers",
],
shared_libs: [
- "libnativehelper",
"libnativebridge",
"libnativeloader",
"libbacktrace",
@@ -510,7 +516,12 @@ art_cc_library {
"libartd",
"libbase",
"libbacktrace",
- "libnativehelper",
+ ],
+ header_libs: [
+ "libnativehelper_header_only",
+ ],
+ include_dirs: [
+ "external/icu/icu4c/source/common",
],
}
@@ -596,6 +607,7 @@ art_cc_test {
"mirror/dex_cache_test.cc",
"mirror/method_type_test.cc",
"mirror/object_test.cc",
+ "mirror/var_handle_test.cc",
"monitor_pool_test.cc",
"monitor_test.cc",
"oat_file_test.cc",
@@ -622,6 +634,9 @@ art_cc_test {
header_libs: [
"art_cmdlineparser_headers", // For parsed_options_test.
],
+ include_dirs: [
+ "external/zlib",
+ ],
}
art_cc_test {
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 5c313783be..ef2b34236f 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -59,7 +59,7 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED
// get the method from the top of the stack. However it's in r0.
uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(sc->fault_address);
uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
- reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kArm));
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kArm));
if (overflow_addr == fault_addr) {
*out_method = reinterpret_cast<ArtMethod*>(sc->arm_r0);
} else {
@@ -209,7 +209,7 @@ bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTR
VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
", fault_addr: " << fault_addr;
- uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kArm);
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kArm);
// Check that the fault address is the value expected for a stack overflow.
if (fault_addr != overflow_addr) {
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index 249696883a..b789fc7481 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -266,7 +266,7 @@ ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromAssembly() {
}
bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
- if (kArm != other->GetInstructionSet()) {
+ if (InstructionSet::kArm != other->GetInstructionSet()) {
return false;
}
const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
@@ -276,7 +276,7 @@ bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) cons
}
bool ArmInstructionSetFeatures::HasAtLeast(const InstructionSetFeatures* other) const {
- if (kArm != other->GetInstructionSet()) {
+ if (InstructionSet::kArm != other->GetInstructionSet()) {
return false;
}
const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
index f438a768a5..f82534b511 100644
--- a/runtime/arch/arm/instruction_set_features_arm.h
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -52,7 +52,7 @@ class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
InstructionSet GetInstructionSet() const OVERRIDE {
- return kArm;
+ return InstructionSet::kArm;
}
uint32_t AsBitmap() const OVERRIDE;
diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc
index 35823518ce..d9651f9088 100644
--- a/runtime/arch/arm/instruction_set_features_arm_test.cc
+++ b/runtime/arch/arm/instruction_set_features_arm_test.cc
@@ -24,10 +24,10 @@ TEST(ArmInstructionSetFeaturesTest, ArmFeaturesFromVariant) {
// Build features for a 32-bit ARM krait processor.
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> krait_features(
- InstructionSetFeatures::FromVariant(kArm, "krait", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kArm, "krait", &error_msg));
ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
- ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
+ ASSERT_EQ(krait_features->GetInstructionSet(), InstructionSet::kArm);
EXPECT_TRUE(krait_features->Equals(krait_features.get()));
EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
@@ -36,10 +36,10 @@ TEST(ArmInstructionSetFeaturesTest, ArmFeaturesFromVariant) {
// Build features for a 32-bit ARM kryo processor.
std::unique_ptr<const InstructionSetFeatures> kryo_features(
- InstructionSetFeatures::FromVariant(kArm, "kryo", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kArm, "kryo", &error_msg));
ASSERT_TRUE(kryo_features.get() != nullptr) << error_msg;
- ASSERT_EQ(kryo_features->GetInstructionSet(), kArm);
+ ASSERT_EQ(kryo_features->GetInstructionSet(), InstructionSet::kArm);
EXPECT_TRUE(kryo_features->Equals(kryo_features.get()));
EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
@@ -48,7 +48,7 @@ TEST(ArmInstructionSetFeaturesTest, ArmFeaturesFromVariant) {
// Build features for a 32-bit ARM denver processor.
std::unique_ptr<const InstructionSetFeatures> denver_features(
- InstructionSetFeatures::FromVariant(kArm, "denver", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kArm, "denver", &error_msg));
ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
EXPECT_TRUE(denver_features->Equals(denver_features.get()));
@@ -62,7 +62,7 @@ TEST(ArmInstructionSetFeaturesTest, ArmFeaturesFromVariant) {
// Build features for a 32-bit ARMv7 processor.
std::unique_ptr<const InstructionSetFeatures> generic_features(
- InstructionSetFeatures::FromVariant(kArm, "generic", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kArm, "generic", &error_msg));
ASSERT_TRUE(generic_features.get() != nullptr) << error_msg;
EXPECT_TRUE(generic_features->Equals(generic_features.get()));
@@ -75,7 +75,7 @@ TEST(ArmInstructionSetFeaturesTest, ArmFeaturesFromVariant) {
// ARM6 is not a supported architecture variant.
std::unique_ptr<const InstructionSetFeatures> arm6_features(
- InstructionSetFeatures::FromVariant(kArm, "arm6", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kArm, "arm6", &error_msg));
EXPECT_TRUE(arm6_features.get() == nullptr);
EXPECT_NE(error_msg.size(), 0U);
}
@@ -83,7 +83,7 @@ TEST(ArmInstructionSetFeaturesTest, ArmFeaturesFromVariant) {
TEST(ArmInstructionSetFeaturesTest, ArmAddFeaturesFromString) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> base_features(
- InstructionSetFeatures::FromVariant(kArm, "generic", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kArm, "generic", &error_msg));
ASSERT_TRUE(base_features.get() != nullptr) << error_msg;
// Build features for a 32-bit ARM with LPAE and div processor.
@@ -91,7 +91,7 @@ TEST(ArmInstructionSetFeaturesTest, ArmAddFeaturesFromString) {
base_features->AddFeaturesFromString("atomic_ldrd_strd,div", &error_msg));
ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
- ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
+ ASSERT_EQ(krait_features->GetInstructionSet(), InstructionSet::kArm);
EXPECT_TRUE(krait_features->Equals(krait_features.get()));
EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
@@ -103,7 +103,7 @@ TEST(ArmInstructionSetFeaturesTest, ArmAddFeaturesFromString) {
base_features->AddFeaturesFromString("atomic_ldrd_strd,div", &error_msg));
ASSERT_TRUE(kryo_features.get() != nullptr) << error_msg;
- ASSERT_EQ(kryo_features->GetInstructionSet(), kArm);
+ ASSERT_EQ(kryo_features->GetInstructionSet(), InstructionSet::kArm);
EXPECT_TRUE(kryo_features->Equals(krait_features.get()));
EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index b9f9d551a9..d535c7e3c6 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -52,7 +52,7 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED
// get the method from the top of the stack. However it's in x0.
uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(sc->fault_address);
uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
- reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kArm64));
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kArm64));
if (overflow_addr == fault_addr) {
*out_method = reinterpret_cast<ArtMethod*>(sc->regs[0]);
} else {
@@ -164,7 +164,7 @@ bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTR
VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
", fault_addr: " << fault_addr;
- uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kArm64);
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kArm64);
// Check that the fault address is the value expected for a stack overflow.
if (fault_addr != overflow_addr) {
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index e5f6f11326..d830ccffbb 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -98,7 +98,7 @@ Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromAssembly() {
}
bool Arm64InstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
- if (kArm64 != other->GetInstructionSet()) {
+ if (InstructionSet::kArm64 != other->GetInstructionSet()) {
return false;
}
const Arm64InstructionSetFeatures* other_as_arm64 = other->AsArm64InstructionSetFeatures();
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index 4243d32968..af2d4c79f9 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -50,7 +50,7 @@ class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
InstructionSet GetInstructionSet() const OVERRIDE {
- return kArm64;
+ return InstructionSet::kArm64;
}
uint32_t AsBitmap() const OVERRIDE;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index 91cb58fedf..7fd39b6b1b 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -24,41 +24,41 @@ TEST(Arm64InstructionSetFeaturesTest, Arm64Features) {
// Build features for an ARM64 processor.
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> arm64_features(
- InstructionSetFeatures::FromVariant(kArm64, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "default", &error_msg));
ASSERT_TRUE(arm64_features.get() != nullptr) << error_msg;
- EXPECT_EQ(arm64_features->GetInstructionSet(), kArm64);
+ EXPECT_EQ(arm64_features->GetInstructionSet(), InstructionSet::kArm64);
EXPECT_TRUE(arm64_features->Equals(arm64_features.get()));
EXPECT_STREQ("a53", arm64_features->GetFeatureString().c_str());
EXPECT_EQ(arm64_features->AsBitmap(), 1U);
std::unique_ptr<const InstructionSetFeatures> cortex_a57_features(
- InstructionSetFeatures::FromVariant(kArm64, "cortex-a57", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a57", &error_msg));
ASSERT_TRUE(cortex_a57_features.get() != nullptr) << error_msg;
- EXPECT_EQ(cortex_a57_features->GetInstructionSet(), kArm64);
+ EXPECT_EQ(cortex_a57_features->GetInstructionSet(), InstructionSet::kArm64);
EXPECT_TRUE(cortex_a57_features->Equals(cortex_a57_features.get()));
EXPECT_STREQ("a53", cortex_a57_features->GetFeatureString().c_str());
EXPECT_EQ(cortex_a57_features->AsBitmap(), 1U);
std::unique_ptr<const InstructionSetFeatures> cortex_a73_features(
- InstructionSetFeatures::FromVariant(kArm64, "cortex-a73", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a73", &error_msg));
ASSERT_TRUE(cortex_a73_features.get() != nullptr) << error_msg;
- EXPECT_EQ(cortex_a73_features->GetInstructionSet(), kArm64);
+ EXPECT_EQ(cortex_a73_features->GetInstructionSet(), InstructionSet::kArm64);
EXPECT_TRUE(cortex_a73_features->Equals(cortex_a73_features.get()));
EXPECT_STREQ("a53", cortex_a73_features->GetFeatureString().c_str());
EXPECT_EQ(cortex_a73_features->AsBitmap(), 1U);
std::unique_ptr<const InstructionSetFeatures> cortex_a35_features(
- InstructionSetFeatures::FromVariant(kArm64, "cortex-a35", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a35", &error_msg));
ASSERT_TRUE(cortex_a35_features.get() != nullptr) << error_msg;
- EXPECT_EQ(cortex_a35_features->GetInstructionSet(), kArm64);
+ EXPECT_EQ(cortex_a35_features->GetInstructionSet(), InstructionSet::kArm64);
EXPECT_TRUE(cortex_a35_features->Equals(cortex_a35_features.get()));
EXPECT_STREQ("-a53", cortex_a35_features->GetFeatureString().c_str());
EXPECT_EQ(cortex_a35_features->AsBitmap(), 0U);
std::unique_ptr<const InstructionSetFeatures> kryo_features(
- InstructionSetFeatures::FromVariant(kArm64, "kryo", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "kryo", &error_msg));
ASSERT_TRUE(kryo_features.get() != nullptr) << error_msg;
- EXPECT_EQ(kryo_features->GetInstructionSet(), kArm64);
+ EXPECT_EQ(kryo_features->GetInstructionSet(), InstructionSet::kArm64);
EXPECT_TRUE(kryo_features->Equals(kryo_features.get()));
EXPECT_TRUE(kryo_features->Equals(cortex_a35_features.get()));
EXPECT_FALSE(kryo_features->Equals(cortex_a57_features.get()));
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index adfc88fd35..280e5937c6 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -2921,7 +2921,7 @@ ENTRY art_quick_invoke_polymorphic
INCREASE_FRAME 16 // Reserve space for JValue result.
str xzr, [sp, #0] // Initialize result to zero.
mov x0, sp // Set r0 to point to result.
- bl artInvokePolymorphic // ArtInvokePolymorphic(result, receiver, thread, save_area)
+ bl artInvokePolymorphic // artInvokePolymorphic(result, receiver, thread, save_area)
uxtb w0, w0 // Result is the return type descriptor as a char.
sub w0, w0, 'A' // Convert to zero based index.
cmp w0, 'Z' - 'A'
diff --git a/runtime/arch/instruction_set.cc b/runtime/arch/instruction_set.cc
index 64af7eccd4..ecccdcf7eb 100644
--- a/runtime/arch/instruction_set.cc
+++ b/runtime/arch/instruction_set.cc
@@ -26,14 +26,14 @@ namespace art {
void InstructionSetAbort(InstructionSet isa) {
switch (isa) {
- case kArm:
- case kThumb2:
- case kArm64:
- case kX86:
- case kX86_64:
- case kMips:
- case kMips64:
- case kNone:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
+ case InstructionSet::kArm64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64:
+ case InstructionSet::kNone:
LOG(FATAL) << "Unsupported instruction set " << isa;
UNREACHABLE();
}
@@ -43,20 +43,20 @@ void InstructionSetAbort(InstructionSet isa) {
const char* GetInstructionSetString(InstructionSet isa) {
switch (isa) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return "arm";
- case kArm64:
+ case InstructionSet::kArm64:
return "arm64";
- case kX86:
+ case InstructionSet::kX86:
return "x86";
- case kX86_64:
+ case InstructionSet::kX86_64:
return "x86_64";
- case kMips:
+ case InstructionSet::kMips:
return "mips";
- case kMips64:
+ case InstructionSet::kMips64:
return "mips64";
- case kNone:
+ case InstructionSet::kNone:
return "none";
}
LOG(FATAL) << "Unknown ISA " << isa;
@@ -67,62 +67,62 @@ InstructionSet GetInstructionSetFromString(const char* isa_str) {
CHECK(isa_str != nullptr);
if (strcmp("arm", isa_str) == 0) {
- return kArm;
+ return InstructionSet::kArm;
} else if (strcmp("arm64", isa_str) == 0) {
- return kArm64;
+ return InstructionSet::kArm64;
} else if (strcmp("x86", isa_str) == 0) {
- return kX86;
+ return InstructionSet::kX86;
} else if (strcmp("x86_64", isa_str) == 0) {
- return kX86_64;
+ return InstructionSet::kX86_64;
} else if (strcmp("mips", isa_str) == 0) {
- return kMips;
+ return InstructionSet::kMips;
} else if (strcmp("mips64", isa_str) == 0) {
- return kMips64;
+ return InstructionSet::kMips64;
}
- return kNone;
+ return InstructionSet::kNone;
}
InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags) {
switch (e_machine) {
case EM_ARM:
- return kArm;
+ return InstructionSet::kArm;
case EM_AARCH64:
- return kArm64;
+ return InstructionSet::kArm64;
case EM_386:
- return kX86;
+ return InstructionSet::kX86;
case EM_X86_64:
- return kX86_64;
+ return InstructionSet::kX86_64;
case EM_MIPS: {
if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R2 ||
(e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) {
- return kMips;
+ return InstructionSet::kMips;
} else if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_64R6) {
- return kMips64;
+ return InstructionSet::kMips64;
}
break;
}
}
- return kNone;
+ return InstructionSet::kNone;
}
size_t GetInstructionSetAlignment(InstructionSet isa) {
switch (isa) {
- case kArm:
+ case InstructionSet::kArm:
// Fall-through.
- case kThumb2:
+ case InstructionSet::kThumb2:
return kArmAlignment;
- case kArm64:
+ case InstructionSet::kArm64:
return kArm64Alignment;
- case kX86:
+ case InstructionSet::kX86:
// Fall-through.
- case kX86_64:
+ case InstructionSet::kX86_64:
return kX86Alignment;
- case kMips:
+ case InstructionSet::kMips:
// Fall-through.
- case kMips64:
+ case InstructionSet::kMips64:
return kMipsAlignment;
- case kNone:
+ case InstructionSet::kNone:
LOG(FATAL) << "ISA kNone does not have alignment.";
UNREACHABLE();
}
@@ -171,26 +171,26 @@ static_assert(ART_FRAME_SIZE_LIMIT < kX86_64StackOverflowReservedBytes,
size_t GetStackOverflowReservedBytes(InstructionSet isa) {
switch (isa) {
- case kArm: // Intentional fall-through.
- case kThumb2:
+ case InstructionSet::kArm: // Intentional fall-through.
+ case InstructionSet::kThumb2:
return kArmStackOverflowReservedBytes;
- case kArm64:
+ case InstructionSet::kArm64:
return kArm64StackOverflowReservedBytes;
- case kMips:
+ case InstructionSet::kMips:
return kMipsStackOverflowReservedBytes;
- case kMips64:
+ case InstructionSet::kMips64:
return kMips64StackOverflowReservedBytes;
- case kX86:
+ case InstructionSet::kX86:
return kX86StackOverflowReservedBytes;
- case kX86_64:
+ case InstructionSet::kX86_64:
return kX86_64StackOverflowReservedBytes;
- case kNone:
+ case InstructionSet::kNone:
LOG(FATAL) << "kNone has no stack overflow size";
UNREACHABLE();
}
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index 7203b188c1..6434005dda 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -25,7 +25,7 @@
namespace art {
-enum InstructionSet {
+enum class InstructionSet {
kNone,
kArm,
kArm64,
@@ -33,24 +33,25 @@ enum InstructionSet {
kX86,
kX86_64,
kMips,
- kMips64
+ kMips64,
+ kLast = kMips64
};
std::ostream& operator<<(std::ostream& os, const InstructionSet& rhs);
#if defined(__arm__)
-static constexpr InstructionSet kRuntimeISA = kArm;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kArm;
#elif defined(__aarch64__)
-static constexpr InstructionSet kRuntimeISA = kArm64;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kArm64;
#elif defined(__mips__) && !defined(__LP64__)
-static constexpr InstructionSet kRuntimeISA = kMips;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kMips;
#elif defined(__mips__) && defined(__LP64__)
-static constexpr InstructionSet kRuntimeISA = kMips64;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kMips64;
#elif defined(__i386__)
-static constexpr InstructionSet kRuntimeISA = kX86;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kX86;
#elif defined(__x86_64__)
-static constexpr InstructionSet kRuntimeISA = kX86_64;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kX86_64;
#else
-static constexpr InstructionSet kRuntimeISA = kNone;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kNone;
#endif
// Architecture-specific pointer sizes
@@ -95,22 +96,22 @@ NO_RETURN void InstructionSetAbort(InstructionSet isa);
constexpr PointerSize GetInstructionSetPointerSize(InstructionSet isa) {
switch (isa) {
- case kArm:
+ case InstructionSet::kArm:
// Fall-through.
- case kThumb2:
+ case InstructionSet::kThumb2:
return kArmPointerSize;
- case kArm64:
+ case InstructionSet::kArm64:
return kArm64PointerSize;
- case kX86:
+ case InstructionSet::kX86:
return kX86PointerSize;
- case kX86_64:
+ case InstructionSet::kX86_64:
return kX86_64PointerSize;
- case kMips:
+ case InstructionSet::kMips:
return kMipsPointerSize;
- case kMips64:
+ case InstructionSet::kMips64:
return kMips64PointerSize;
- case kNone:
+ case InstructionSet::kNone:
break;
}
InstructionSetAbort(isa);
@@ -118,22 +119,22 @@ constexpr PointerSize GetInstructionSetPointerSize(InstructionSet isa) {
constexpr size_t GetInstructionSetInstructionAlignment(InstructionSet isa) {
switch (isa) {
- case kArm:
+ case InstructionSet::kArm:
// Fall-through.
- case kThumb2:
+ case InstructionSet::kThumb2:
return kThumb2InstructionAlignment;
- case kArm64:
+ case InstructionSet::kArm64:
return kArm64InstructionAlignment;
- case kX86:
+ case InstructionSet::kX86:
return kX86InstructionAlignment;
- case kX86_64:
+ case InstructionSet::kX86_64:
return kX86_64InstructionAlignment;
- case kMips:
+ case InstructionSet::kMips:
return kMipsInstructionAlignment;
- case kMips64:
+ case InstructionSet::kMips64:
return kMips64InstructionAlignment;
- case kNone:
+ case InstructionSet::kNone:
break;
}
InstructionSetAbort(isa);
@@ -141,16 +142,16 @@ constexpr size_t GetInstructionSetInstructionAlignment(InstructionSet isa) {
constexpr bool IsValidInstructionSet(InstructionSet isa) {
switch (isa) {
- case kArm:
- case kThumb2:
- case kArm64:
- case kX86:
- case kX86_64:
- case kMips:
- case kMips64:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
+ case InstructionSet::kArm64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64:
return true;
- case kNone:
+ case InstructionSet::kNone:
return false;
}
return false;
@@ -160,18 +161,18 @@ size_t GetInstructionSetAlignment(InstructionSet isa);
constexpr bool Is64BitInstructionSet(InstructionSet isa) {
switch (isa) {
- case kArm:
- case kThumb2:
- case kX86:
- case kMips:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
+ case InstructionSet::kX86:
+ case InstructionSet::kMips:
return false;
- case kArm64:
- case kX86_64:
- case kMips64:
+ case InstructionSet::kArm64:
+ case InstructionSet::kX86_64:
+ case InstructionSet::kMips64:
return true;
- case kNone:
+ case InstructionSet::kNone:
break;
}
InstructionSetAbort(isa);
@@ -183,22 +184,22 @@ constexpr PointerSize InstructionSetPointerSize(InstructionSet isa) {
constexpr size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
switch (isa) {
- case kArm:
+ case InstructionSet::kArm:
// Fall-through.
- case kThumb2:
+ case InstructionSet::kThumb2:
return 4;
- case kArm64:
+ case InstructionSet::kArm64:
return 8;
- case kX86:
+ case InstructionSet::kX86:
return 4;
- case kX86_64:
+ case InstructionSet::kX86_64:
return 8;
- case kMips:
+ case InstructionSet::kMips:
return 4;
- case kMips64:
+ case InstructionSet::kMips64:
return 8;
- case kNone:
+ case InstructionSet::kNone:
break;
}
InstructionSetAbort(isa);
@@ -206,22 +207,22 @@ constexpr size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
constexpr size_t GetBytesPerFprSpillLocation(InstructionSet isa) {
switch (isa) {
- case kArm:
+ case InstructionSet::kArm:
// Fall-through.
- case kThumb2:
+ case InstructionSet::kThumb2:
return 4;
- case kArm64:
+ case InstructionSet::kArm64:
return 8;
- case kX86:
+ case InstructionSet::kX86:
return 8;
- case kX86_64:
+ case InstructionSet::kX86_64:
return 8;
- case kMips:
+ case InstructionSet::kMips:
return 4;
- case kMips64:
+ case InstructionSet::kMips64:
return 8;
- case kNone:
+ case InstructionSet::kNone:
break;
}
InstructionSetAbort(isa);
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index ed8ff607a9..b6b24c24fb 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -33,21 +33,21 @@ namespace art {
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromVariant(
InstructionSet isa, const std::string& variant, std::string* error_msg) {
switch (isa) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return ArmInstructionSetFeatures::FromVariant(variant, error_msg);
- case kArm64:
+ case InstructionSet::kArm64:
return Arm64InstructionSetFeatures::FromVariant(variant, error_msg);
- case kMips:
+ case InstructionSet::kMips:
return MipsInstructionSetFeatures::FromVariant(variant, error_msg);
- case kMips64:
+ case InstructionSet::kMips64:
return Mips64InstructionSetFeatures::FromVariant(variant, error_msg);
- case kX86:
+ case InstructionSet::kX86:
return X86InstructionSetFeatures::FromVariant(variant, error_msg);
- case kX86_64:
+ case InstructionSet::kX86_64:
return X86_64InstructionSetFeatures::FromVariant(variant, error_msg);
- case kNone:
+ case InstructionSet::kNone:
break;
}
UNIMPLEMENTED(FATAL) << isa;
@@ -58,27 +58,27 @@ std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromBitmap
uint32_t bitmap) {
std::unique_ptr<const InstructionSetFeatures> result;
switch (isa) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
result = ArmInstructionSetFeatures::FromBitmap(bitmap);
break;
- case kArm64:
+ case InstructionSet::kArm64:
result = Arm64InstructionSetFeatures::FromBitmap(bitmap);
break;
- case kMips:
+ case InstructionSet::kMips:
result = MipsInstructionSetFeatures::FromBitmap(bitmap);
break;
- case kMips64:
+ case InstructionSet::kMips64:
result = Mips64InstructionSetFeatures::FromBitmap(bitmap);
break;
- case kX86:
+ case InstructionSet::kX86:
result = X86InstructionSetFeatures::FromBitmap(bitmap);
break;
- case kX86_64:
+ case InstructionSet::kX86_64:
result = X86_64InstructionSetFeatures::FromBitmap(bitmap);
break;
- case kNone:
+ case InstructionSet::kNone:
default:
UNIMPLEMENTED(FATAL) << isa;
UNREACHABLE();
@@ -89,21 +89,21 @@ std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromBitmap
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCppDefines() {
switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return ArmInstructionSetFeatures::FromCppDefines();
- case kArm64:
+ case InstructionSet::kArm64:
return Arm64InstructionSetFeatures::FromCppDefines();
- case kMips:
+ case InstructionSet::kMips:
return MipsInstructionSetFeatures::FromCppDefines();
- case kMips64:
+ case InstructionSet::kMips64:
return Mips64InstructionSetFeatures::FromCppDefines();
- case kX86:
+ case InstructionSet::kX86:
return X86InstructionSetFeatures::FromCppDefines();
- case kX86_64:
+ case InstructionSet::kX86_64:
return X86_64InstructionSetFeatures::FromCppDefines();
- case kNone:
+ case InstructionSet::kNone:
break;
}
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -113,21 +113,21 @@ std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCppDef
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCpuInfo() {
switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return ArmInstructionSetFeatures::FromCpuInfo();
- case kArm64:
+ case InstructionSet::kArm64:
return Arm64InstructionSetFeatures::FromCpuInfo();
- case kMips:
+ case InstructionSet::kMips:
return MipsInstructionSetFeatures::FromCpuInfo();
- case kMips64:
+ case InstructionSet::kMips64:
return Mips64InstructionSetFeatures::FromCpuInfo();
- case kX86:
+ case InstructionSet::kX86:
return X86InstructionSetFeatures::FromCpuInfo();
- case kX86_64:
+ case InstructionSet::kX86_64:
return X86_64InstructionSetFeatures::FromCpuInfo();
- case kNone:
+ case InstructionSet::kNone:
break;
}
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -136,21 +136,21 @@ std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCpuInf
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromHwcap() {
switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return ArmInstructionSetFeatures::FromHwcap();
- case kArm64:
+ case InstructionSet::kArm64:
return Arm64InstructionSetFeatures::FromHwcap();
- case kMips:
+ case InstructionSet::kMips:
return MipsInstructionSetFeatures::FromHwcap();
- case kMips64:
+ case InstructionSet::kMips64:
return Mips64InstructionSetFeatures::FromHwcap();
- case kX86:
+ case InstructionSet::kX86:
return X86InstructionSetFeatures::FromHwcap();
- case kX86_64:
+ case InstructionSet::kX86_64:
return X86_64InstructionSetFeatures::FromHwcap();
- case kNone:
+ case InstructionSet::kNone:
break;
}
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -159,21 +159,21 @@ std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromHwcap(
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromAssembly() {
switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return ArmInstructionSetFeatures::FromAssembly();
- case kArm64:
+ case InstructionSet::kArm64:
return Arm64InstructionSetFeatures::FromAssembly();
- case kMips:
+ case InstructionSet::kMips:
return MipsInstructionSetFeatures::FromAssembly();
- case kMips64:
+ case InstructionSet::kMips64:
return Mips64InstructionSetFeatures::FromAssembly();
- case kX86:
+ case InstructionSet::kX86:
return X86InstructionSetFeatures::FromAssembly();
- case kX86_64:
+ case InstructionSet::kX86_64:
return X86_64InstructionSetFeatures::FromAssembly();
- case kNone:
+ case InstructionSet::kNone:
break;
}
UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -222,32 +222,33 @@ std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddFeature
}
const ArmInstructionSetFeatures* InstructionSetFeatures::AsArmInstructionSetFeatures() const {
- DCHECK_EQ(kArm, GetInstructionSet());
+ DCHECK_EQ(InstructionSet::kArm, GetInstructionSet());
return down_cast<const ArmInstructionSetFeatures*>(this);
}
const Arm64InstructionSetFeatures* InstructionSetFeatures::AsArm64InstructionSetFeatures() const {
- DCHECK_EQ(kArm64, GetInstructionSet());
+ DCHECK_EQ(InstructionSet::kArm64, GetInstructionSet());
return down_cast<const Arm64InstructionSetFeatures*>(this);
}
const MipsInstructionSetFeatures* InstructionSetFeatures::AsMipsInstructionSetFeatures() const {
- DCHECK_EQ(kMips, GetInstructionSet());
+ DCHECK_EQ(InstructionSet::kMips, GetInstructionSet());
return down_cast<const MipsInstructionSetFeatures*>(this);
}
const Mips64InstructionSetFeatures* InstructionSetFeatures::AsMips64InstructionSetFeatures() const {
- DCHECK_EQ(kMips64, GetInstructionSet());
+ DCHECK_EQ(InstructionSet::kMips64, GetInstructionSet());
return down_cast<const Mips64InstructionSetFeatures*>(this);
}
const X86InstructionSetFeatures* InstructionSetFeatures::AsX86InstructionSetFeatures() const {
- DCHECK(kX86 == GetInstructionSet() || kX86_64 == GetInstructionSet());
+ DCHECK(InstructionSet::kX86 == GetInstructionSet() ||
+ InstructionSet::kX86_64 == GetInstructionSet());
return down_cast<const X86InstructionSetFeatures*>(this);
}
const X86_64InstructionSetFeatures* InstructionSetFeatures::AsX86_64InstructionSetFeatures() const {
- DCHECK_EQ(kX86_64, GetInstructionSet());
+ DCHECK_EQ(InstructionSet::kX86_64, GetInstructionSet());
return down_cast<const X86_64InstructionSetFeatures*>(this);
}
diff --git a/runtime/arch/instruction_set_test.cc b/runtime/arch/instruction_set_test.cc
index b251b57c99..12a117d7a1 100644
--- a/runtime/arch/instruction_set_test.cc
+++ b/runtime/arch/instruction_set_test.cc
@@ -23,34 +23,40 @@
namespace art {
TEST(InstructionSetTest, GetInstructionSetFromString) {
- EXPECT_EQ(kArm, GetInstructionSetFromString("arm"));
- EXPECT_EQ(kArm64, GetInstructionSetFromString("arm64"));
- EXPECT_EQ(kX86, GetInstructionSetFromString("x86"));
- EXPECT_EQ(kX86_64, GetInstructionSetFromString("x86_64"));
- EXPECT_EQ(kMips, GetInstructionSetFromString("mips"));
- EXPECT_EQ(kMips64, GetInstructionSetFromString("mips64"));
- EXPECT_EQ(kNone, GetInstructionSetFromString("none"));
- EXPECT_EQ(kNone, GetInstructionSetFromString("random-string"));
+ EXPECT_EQ(InstructionSet::kArm, GetInstructionSetFromString("arm"));
+ EXPECT_EQ(InstructionSet::kArm64, GetInstructionSetFromString("arm64"));
+ EXPECT_EQ(InstructionSet::kX86, GetInstructionSetFromString("x86"));
+ EXPECT_EQ(InstructionSet::kX86_64, GetInstructionSetFromString("x86_64"));
+ EXPECT_EQ(InstructionSet::kMips, GetInstructionSetFromString("mips"));
+ EXPECT_EQ(InstructionSet::kMips64, GetInstructionSetFromString("mips64"));
+ EXPECT_EQ(InstructionSet::kNone, GetInstructionSetFromString("none"));
+ EXPECT_EQ(InstructionSet::kNone, GetInstructionSetFromString("random-string"));
}
TEST(InstructionSetTest, GetInstructionSetString) {
- EXPECT_STREQ("arm", GetInstructionSetString(kArm));
- EXPECT_STREQ("arm", GetInstructionSetString(kThumb2));
- EXPECT_STREQ("arm64", GetInstructionSetString(kArm64));
- EXPECT_STREQ("x86", GetInstructionSetString(kX86));
- EXPECT_STREQ("x86_64", GetInstructionSetString(kX86_64));
- EXPECT_STREQ("mips", GetInstructionSetString(kMips));
- EXPECT_STREQ("mips64", GetInstructionSetString(kMips64));
- EXPECT_STREQ("none", GetInstructionSetString(kNone));
+ EXPECT_STREQ("arm", GetInstructionSetString(InstructionSet::kArm));
+ EXPECT_STREQ("arm", GetInstructionSetString(InstructionSet::kThumb2));
+ EXPECT_STREQ("arm64", GetInstructionSetString(InstructionSet::kArm64));
+ EXPECT_STREQ("x86", GetInstructionSetString(InstructionSet::kX86));
+ EXPECT_STREQ("x86_64", GetInstructionSetString(InstructionSet::kX86_64));
+ EXPECT_STREQ("mips", GetInstructionSetString(InstructionSet::kMips));
+ EXPECT_STREQ("mips64", GetInstructionSetString(InstructionSet::kMips64));
+ EXPECT_STREQ("none", GetInstructionSetString(InstructionSet::kNone));
}
TEST(InstructionSetTest, GetInstructionSetInstructionAlignment) {
- EXPECT_EQ(GetInstructionSetInstructionAlignment(kThumb2), kThumb2InstructionAlignment);
- EXPECT_EQ(GetInstructionSetInstructionAlignment(kArm64), kArm64InstructionAlignment);
- EXPECT_EQ(GetInstructionSetInstructionAlignment(kX86), kX86InstructionAlignment);
- EXPECT_EQ(GetInstructionSetInstructionAlignment(kX86_64), kX86_64InstructionAlignment);
- EXPECT_EQ(GetInstructionSetInstructionAlignment(kMips), kMipsInstructionAlignment);
- EXPECT_EQ(GetInstructionSetInstructionAlignment(kMips64), kMips64InstructionAlignment);
+ EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kThumb2),
+ kThumb2InstructionAlignment);
+ EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kArm64),
+ kArm64InstructionAlignment);
+ EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kX86),
+ kX86InstructionAlignment);
+ EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kX86_64),
+ kX86_64InstructionAlignment);
+ EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kMips),
+ kMipsInstructionAlignment);
+ EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kMips64),
+ kMips64InstructionAlignment);
}
TEST(InstructionSetTest, TestRoundTrip) {
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
index 50095ae77e..fa51059d3a 100644
--- a/runtime/arch/mips/asm_support_mips.S
+++ b/runtime/arch/mips/asm_support_mips.S
@@ -173,4 +173,30 @@
.set pop
.endm
+// This utility macro is used to check whether the address contained in
+// a register is suitably aligned. Default usage is confirm that the
+// address stored in $sp is a multiple of 16. It can be used for other
+// alignments, and for other base address registers, if needed.
+//
+// Enable this macro by running the shell command:
+//
+// export ART_MIPS32_CHECK_ALIGNMENT=true
+//
+// NOTE: The value of alignment must be a power of 2, and must fit in an
+// unsigned 15-bit integer. The macro won't behave as expected if these
+// conditions aren't met.
+//
+.macro CHECK_ALIGNMENT ba=$sp, tmp=$at, alignment=16
+#ifdef ART_MIPS32_CHECK_ALIGNMENT
+ .set push
+ .set noat
+ .set noreorder
+ andi \tmp, \ba, \alignment-1
+ beqz \tmp, .+12 # Skip break instruction if base address register (ba) is aligned
+ nop
+ break
+ .set pop
+#endif
+.endm
+
#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
index 2edd63f58a..bec52384ac 100644
--- a/runtime/arch/mips/asm_support_mips.h
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -19,7 +19,7 @@
#include "asm_support.h"
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES 96
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES 112
#define FRAME_SIZE_SAVE_REFS_ONLY 48
#define FRAME_SIZE_SAVE_REFS_AND_ARGS 112
#define FRAME_SIZE_SAVE_EVERYTHING 256
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index ca1de0ae2a..3f362de7ce 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -42,7 +42,16 @@ void MipsContext::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr
// Core registers come first, from the highest down to the lowest.
for (uint32_t core_reg : HighToLowBits(frame_info.CoreSpillMask())) {
- gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
+ // If the $ZERO register shows up in the list of registers to
+ // be saved this was only done to properly align the floating
+ // point register save locations to addresses which are
+ // multiples of 8. We only store the address of a register in
+ // gprs_ if the register is not the $ZERO register. The $ZERO
+ // register is read-only so there's never a reason to save it
+ // on the stack.
+ if (core_reg != 0u) {
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
+ }
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
@@ -97,7 +106,9 @@ extern "C" NO_RETURN void art_quick_do_long_jump(uint32_t*, uint32_t*);
void MipsContext::DoLongJump() {
uintptr_t gprs[kNumberOfCoreRegisters];
- uint32_t fprs[kNumberOfFRegisters];
+ // Align fprs[] so that art_quick_do_long_jump() can load FPU
+ // registers from it using the ldc1 instruction.
+ uint32_t fprs[kNumberOfFRegisters] __attribute__((aligned(8)));
for (size_t i = 0; i < kNumberOfCoreRegisters; ++i) {
gprs[i] = gprs_[i] != nullptr ? *gprs_[i] : MipsContext::kBadGprBase + i;
}
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index bf3e96a8ff..6dce54e5c5 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -51,7 +51,7 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
// get the method from the top of the stack. However it's in r0.
uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr); // BVA addr
uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
- reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kMips));
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kMips));
if (overflow_addr == fault_addr) {
*out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips::A0]);
} else {
@@ -124,7 +124,7 @@ bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, voi
VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
", fault_addr: " << fault_addr;
- uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kMips);
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kMips);
// Check that the fault address is the value expected for a stack overflow.
if (fault_addr != overflow_addr) {
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
index 6540b44518..6d4145bc98 100644
--- a/runtime/arch/mips/instruction_set_features_mips.cc
+++ b/runtime/arch/mips/instruction_set_features_mips.cc
@@ -168,7 +168,7 @@ MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromAssembly() {
}
bool MipsInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
- if (kMips != other->GetInstructionSet()) {
+ if (InstructionSet::kMips != other->GetInstructionSet()) {
return false;
}
const MipsInstructionSetFeatures* other_as_mips = other->AsMipsInstructionSetFeatures();
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
index 1cb852e262..ee539edf3a 100644
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -52,7 +52,7 @@ class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
InstructionSet GetInstructionSet() const OVERRIDE {
- return kMips;
+ return InstructionSet::kMips;
}
uint32_t AsBitmap() const OVERRIDE;
diff --git a/runtime/arch/mips/instruction_set_features_mips_test.cc b/runtime/arch/mips/instruction_set_features_mips_test.cc
index 54fd2c94c4..b7de952529 100644
--- a/runtime/arch/mips/instruction_set_features_mips_test.cc
+++ b/runtime/arch/mips/instruction_set_features_mips_test.cc
@@ -23,9 +23,9 @@ namespace art {
TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromDefaultVariant) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> mips_features(
- InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
ASSERT_TRUE(mips_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips_features->GetInstructionSet(), kMips);
+ EXPECT_EQ(mips_features->GetInstructionSet(), InstructionSet::kMips);
EXPECT_TRUE(mips_features->Equals(mips_features.get()));
EXPECT_STREQ("fpu32,mips2,-msa", mips_features->GetFeatureString().c_str());
EXPECT_EQ(mips_features->AsBitmap(), 3U);
@@ -34,15 +34,15 @@ TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromDefaultVariant) {
TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR1Variant) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
- InstructionSetFeatures::FromVariant(kMips, "mips32r1", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips32r1_features->GetInstructionSet(), kMips);
+ EXPECT_EQ(mips32r1_features->GetInstructionSet(), InstructionSet::kMips);
EXPECT_TRUE(mips32r1_features->Equals(mips32r1_features.get()));
EXPECT_STREQ("fpu32,-mips2,-msa", mips32r1_features->GetFeatureString().c_str());
EXPECT_EQ(mips32r1_features->AsBitmap(), 1U);
std::unique_ptr<const InstructionSetFeatures> mips_default_features(
- InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
EXPECT_FALSE(mips32r1_features->Equals(mips_default_features.get()));
}
@@ -50,20 +50,20 @@ TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR1Variant) {
TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR2Variant) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
- InstructionSetFeatures::FromVariant(kMips, "mips32r2", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r2", &error_msg));
ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips32r2_features->GetInstructionSet(), kMips);
+ EXPECT_EQ(mips32r2_features->GetInstructionSet(), InstructionSet::kMips);
EXPECT_TRUE(mips32r2_features->Equals(mips32r2_features.get()));
EXPECT_STREQ("fpu32,mips2,-msa", mips32r2_features->GetFeatureString().c_str());
EXPECT_EQ(mips32r2_features->AsBitmap(), 3U);
std::unique_ptr<const InstructionSetFeatures> mips_default_features(
- InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
EXPECT_TRUE(mips32r2_features->Equals(mips_default_features.get()));
std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
- InstructionSetFeatures::FromVariant(kMips, "mips32r1", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
EXPECT_FALSE(mips32r2_features->Equals(mips32r1_features.get()));
}
@@ -71,25 +71,25 @@ TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR2Variant) {
TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR5Variant) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> mips32r5_features(
- InstructionSetFeatures::FromVariant(kMips, "mips32r5", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r5", &error_msg));
ASSERT_TRUE(mips32r5_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips32r5_features->GetInstructionSet(), kMips);
+ EXPECT_EQ(mips32r5_features->GetInstructionSet(), InstructionSet::kMips);
EXPECT_TRUE(mips32r5_features->Equals(mips32r5_features.get()));
EXPECT_STREQ("-fpu32,mips2,msa", mips32r5_features->GetFeatureString().c_str());
EXPECT_EQ(mips32r5_features->AsBitmap(), 10U);
std::unique_ptr<const InstructionSetFeatures> mips_default_features(
- InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
EXPECT_FALSE(mips32r5_features->Equals(mips_default_features.get()));
std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
- InstructionSetFeatures::FromVariant(kMips, "mips32r1", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
EXPECT_FALSE(mips32r5_features->Equals(mips32r1_features.get()));
std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
- InstructionSetFeatures::FromVariant(kMips, "mips32r2", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r2", &error_msg));
ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
EXPECT_FALSE(mips32r5_features->Equals(mips32r2_features.get()));
}
@@ -97,30 +97,30 @@ TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR5Variant) {
TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR6Variant) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> mips32r6_features(
- InstructionSetFeatures::FromVariant(kMips, "mips32r6", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r6", &error_msg));
ASSERT_TRUE(mips32r6_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips32r6_features->GetInstructionSet(), kMips);
+ EXPECT_EQ(mips32r6_features->GetInstructionSet(), InstructionSet::kMips);
EXPECT_TRUE(mips32r6_features->Equals(mips32r6_features.get()));
EXPECT_STREQ("-fpu32,mips2,r6,msa", mips32r6_features->GetFeatureString().c_str());
EXPECT_EQ(mips32r6_features->AsBitmap(), 14U);
std::unique_ptr<const InstructionSetFeatures> mips_default_features(
- InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
EXPECT_FALSE(mips32r6_features->Equals(mips_default_features.get()));
std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
- InstructionSetFeatures::FromVariant(kMips, "mips32r1", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
EXPECT_FALSE(mips32r6_features->Equals(mips32r1_features.get()));
std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
- InstructionSetFeatures::FromVariant(kMips, "mips32r2", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r2", &error_msg));
ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
EXPECT_FALSE(mips32r6_features->Equals(mips32r2_features.get()));
std::unique_ptr<const InstructionSetFeatures> mips32r5_features(
- InstructionSetFeatures::FromVariant(kMips, "mips32r5", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r5", &error_msg));
ASSERT_TRUE(mips32r5_features.get() != nullptr) << error_msg;
EXPECT_FALSE(mips32r6_features->Equals(mips32r5_features.get()));
}
diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S
index 5c950717c4..2c0e75090d 100644
--- a/runtime/arch/mips/jni_entrypoints_mips.S
+++ b/runtime/arch/mips/jni_entrypoints_mips.S
@@ -28,8 +28,9 @@ ENTRY art_jni_dlsym_lookup_stub
.cfi_adjust_cfa_offset 48
sw $ra, 32($sp)
.cfi_rel_offset 31, 32
- SDu $f14, $f15, 24, $sp, $t0
- SDu $f12, $f13, 16, $sp, $t0
+ CHECK_ALIGNMENT $sp, $t0
+ sdc1 $f14, 24($sp)
+ sdc1 $f12, 16($sp)
sw $a3, 12($sp)
.cfi_rel_offset 7, 12
sw $a2, 8($sp)
@@ -45,8 +46,9 @@ ENTRY art_jni_dlsym_lookup_stub
lw $a1, 4($sp)
lw $a2, 8($sp)
lw $a3, 12($sp)
- LDu $f12, $f13, 16, $sp, $t0
- LDu $f14, $f15, 24, $sp, $t0
+ CHECK_ALIGNMENT $sp, $t0
+ ldc1 $f12, 16($sp)
+ ldc1 $f14, 24($sp)
lw $ra, 32($sp)
beq $v0, $zero, .Lno_native_code_found
addiu $sp, $sp, 48 # restore the stack
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index f6204bd8b6..489c52c0d2 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -37,45 +37,49 @@
* Reserves FRAME_SIZE_SAVE_ALL_CALLEE_SAVES + ARG_SLOT_SIZE bytes on the stack
*/
.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- addiu $sp, $sp, -96
- .cfi_adjust_cfa_offset 96
+ addiu $sp, $sp, -112
+ .cfi_adjust_cfa_offset 112
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 96)
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 112)
#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(MIPS) size not as expected."
#endif
- sw $ra, 92($sp)
- .cfi_rel_offset 31, 92
- sw $s8, 88($sp)
- .cfi_rel_offset 30, 88
- sw $gp, 84($sp)
- .cfi_rel_offset 28, 84
- sw $s7, 80($sp)
- .cfi_rel_offset 23, 80
- sw $s6, 76($sp)
- .cfi_rel_offset 22, 76
- sw $s5, 72($sp)
- .cfi_rel_offset 21, 72
- sw $s4, 68($sp)
- .cfi_rel_offset 20, 68
- sw $s3, 64($sp)
- .cfi_rel_offset 19, 64
- sw $s2, 60($sp)
- .cfi_rel_offset 18, 60
- sw $s1, 56($sp)
- .cfi_rel_offset 17, 56
- sw $s0, 52($sp)
- .cfi_rel_offset 16, 52
-
- SDu $f30, $f31, 44, $sp, $t1
- SDu $f28, $f29, 36, $sp, $t1
- SDu $f26, $f27, 28, $sp, $t1
- SDu $f24, $f25, 20, $sp, $t1
- SDu $f22, $f23, 12, $sp, $t1
- SDu $f20, $f21, 4, $sp, $t1
-
- # 1 word for holding Method*
+ sw $ra, 108($sp)
+ .cfi_rel_offset 31, 108
+ sw $s8, 104($sp)
+ .cfi_rel_offset 30, 104
+ sw $gp, 100($sp)
+ .cfi_rel_offset 28, 100
+ sw $s7, 96($sp)
+ .cfi_rel_offset 23, 96
+ sw $s6, 92($sp)
+ .cfi_rel_offset 22, 92
+ sw $s5, 88($sp)
+ .cfi_rel_offset 21, 88
+ sw $s4, 84($sp)
+ .cfi_rel_offset 20, 84
+ sw $s3, 80($sp)
+ .cfi_rel_offset 19, 80
+ sw $s2, 76($sp)
+ .cfi_rel_offset 18, 76
+ sw $s1, 72($sp)
+ .cfi_rel_offset 17, 72
+ sw $s0, 68($sp)
+ .cfi_rel_offset 16, 68
+ // 4-byte placeholder for register $zero, serving for alignment
+ // of the following double precision floating point registers.
+
+ CHECK_ALIGNMENT $sp, $t1
+ sdc1 $f30, 56($sp)
+ sdc1 $f28, 48($sp)
+ sdc1 $f26, 40($sp)
+ sdc1 $f24, 32($sp)
+ sdc1 $f22, 24($sp)
+ sdc1 $f20, 16($sp)
+
+ # 1 word for holding Method* plus 12 bytes padding to keep contents of SP
+ # a multiple of 16.
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
@@ -216,12 +220,13 @@
.cfi_rel_offset 6, 60
sw $a1, 56($sp)
.cfi_rel_offset 5, 56
- SDu $f18, $f19, 48, $sp, $t8
- SDu $f16, $f17, 40, $sp, $t8
- SDu $f14, $f15, 32, $sp, $t8
- SDu $f12, $f13, 24, $sp, $t8
- SDu $f10, $f11, 16, $sp, $t8
- SDu $f8, $f9, 8, $sp, $t8
+ CHECK_ALIGNMENT $sp, $t8
+ sdc1 $f18, 48($sp)
+ sdc1 $f16, 40($sp)
+ sdc1 $f14, 32($sp)
+ sdc1 $f12, 24($sp)
+ sdc1 $f10, 16($sp)
+ sdc1 $f8, 8($sp)
# bottom will hold Method*
.endm
@@ -320,12 +325,13 @@
lw $a2, 60($sp)
.cfi_restore 6
RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1
- LDu $f18, $f19, 48, $sp, $t8
- LDu $f16, $f17, 40, $sp, $t8
- LDu $f14, $f15, 32, $sp, $t8
- LDu $f12, $f13, 24, $sp, $t8
- LDu $f10, $f11, 16, $sp, $t8
- LDu $f8, $f9, 8, $sp, $t8
+ CHECK_ALIGNMENT $sp, $t8
+ ldc1 $f18, 48($sp)
+ ldc1 $f16, 40($sp)
+ ldc1 $f14, 32($sp)
+ ldc1 $f12, 24($sp)
+ ldc1 $f10, 16($sp)
+ ldc1 $f8, 8($sp)
addiu $sp, $sp, 112 # Pop frame.
.cfi_adjust_cfa_offset -112
.endm
@@ -412,22 +418,23 @@
1:
.cpload $ra
- SDu $f30, $f31, 136, $sp, $t1
- SDu $f28, $f29, 128, $sp, $t1
- SDu $f26, $f27, 120, $sp, $t1
- SDu $f24, $f25, 112, $sp, $t1
- SDu $f22, $f23, 104, $sp, $t1
- SDu $f20, $f21, 96, $sp, $t1
- SDu $f18, $f19, 88, $sp, $t1
- SDu $f16, $f17, 80, $sp, $t1
- SDu $f14, $f15, 72, $sp, $t1
- SDu $f12, $f13, 64, $sp, $t1
- SDu $f10, $f11, 56, $sp, $t1
- SDu $f8, $f9, 48, $sp, $t1
- SDu $f6, $f7, 40, $sp, $t1
- SDu $f4, $f5, 32, $sp, $t1
- SDu $f2, $f3, 24, $sp, $t1
- SDu $f0, $f1, 16, $sp, $t1
+ CHECK_ALIGNMENT $sp, $t1
+ sdc1 $f30, 136($sp)
+ sdc1 $f28, 128($sp)
+ sdc1 $f26, 120($sp)
+ sdc1 $f24, 112($sp)
+ sdc1 $f22, 104($sp)
+ sdc1 $f20, 96($sp)
+ sdc1 $f18, 88($sp)
+ sdc1 $f16, 80($sp)
+ sdc1 $f14, 72($sp)
+ sdc1 $f12, 64($sp)
+ sdc1 $f10, 56($sp)
+ sdc1 $f8, 48($sp)
+ sdc1 $f6, 40($sp)
+ sdc1 $f4, 32($sp)
+ sdc1 $f2, 24($sp)
+ sdc1 $f0, 16($sp)
# 3 words padding and 1 word for holding Method*
@@ -460,22 +467,23 @@
addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack
.cfi_adjust_cfa_offset -ARG_SLOT_SIZE
- LDu $f30, $f31, 136, $sp, $t1
- LDu $f28, $f29, 128, $sp, $t1
- LDu $f26, $f27, 120, $sp, $t1
- LDu $f24, $f25, 112, $sp, $t1
- LDu $f22, $f23, 104, $sp, $t1
- LDu $f20, $f21, 96, $sp, $t1
- LDu $f18, $f19, 88, $sp, $t1
- LDu $f16, $f17, 80, $sp, $t1
- LDu $f14, $f15, 72, $sp, $t1
- LDu $f12, $f13, 64, $sp, $t1
- LDu $f10, $f11, 56, $sp, $t1
- LDu $f8, $f9, 48, $sp, $t1
- LDu $f6, $f7, 40, $sp, $t1
- LDu $f4, $f5, 32, $sp, $t1
- LDu $f2, $f3, 24, $sp, $t1
- LDu $f0, $f1, 16, $sp, $t1
+ CHECK_ALIGNMENT $sp, $t1
+ ldc1 $f30, 136($sp)
+ ldc1 $f28, 128($sp)
+ ldc1 $f26, 120($sp)
+ ldc1 $f24, 112($sp)
+ ldc1 $f22, 104($sp)
+ ldc1 $f20, 96($sp)
+ ldc1 $f18, 88($sp)
+ ldc1 $f16, 80($sp)
+ ldc1 $f14, 72($sp)
+ ldc1 $f12, 64($sp)
+ ldc1 $f10, 56($sp)
+ ldc1 $f8, 48($sp)
+ ldc1 $f6, 40($sp)
+ ldc1 $f4, 32($sp)
+ ldc1 $f2, 24($sp)
+ ldc1 $f0, 16($sp)
lw $ra, 252($sp)
.cfi_restore 31
@@ -665,7 +673,8 @@ ENTRY art_quick_osr_stub
b .Losr_exit
sw $v1, 4($a2) # store v0/v1 into result
.Losr_fp_result:
- SDu $f0, $f1, 0, $a2, $t0 # store f0/f1 into result
+ CHECK_ALIGNMENT $a2, $t0, 8
+ sdc1 $f0, 0($a2) # store f0/f1 into result
.Losr_exit:
lw $ra, 44($sp)
.cfi_restore 31
@@ -701,26 +710,28 @@ ENTRY art_quick_osr_stub
END art_quick_osr_stub
/*
- * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_
+ * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_.
+ * Note that fprs_ is expected to be an address that is a multiple of 8.
* FIXME: just guessing about the shape of the jmpbuf. Where will pc be?
*/
ENTRY art_quick_do_long_jump
- LDu $f0, $f1, 0*8, $a1, $t1
- LDu $f2, $f3, 1*8, $a1, $t1
- LDu $f4, $f5, 2*8, $a1, $t1
- LDu $f6, $f7, 3*8, $a1, $t1
- LDu $f8, $f9, 4*8, $a1, $t1
- LDu $f10, $f11, 5*8, $a1, $t1
- LDu $f12, $f13, 6*8, $a1, $t1
- LDu $f14, $f15, 7*8, $a1, $t1
- LDu $f16, $f17, 8*8, $a1, $t1
- LDu $f18, $f19, 9*8, $a1, $t1
- LDu $f20, $f21, 10*8, $a1, $t1
- LDu $f22, $f23, 11*8, $a1, $t1
- LDu $f24, $f25, 12*8, $a1, $t1
- LDu $f26, $f27, 13*8, $a1, $t1
- LDu $f28, $f29, 14*8, $a1, $t1
- LDu $f30, $f31, 15*8, $a1, $t1
+ CHECK_ALIGNMENT $a1, $t1, 8
+ ldc1 $f0, 0*8($a1)
+ ldc1 $f2, 1*8($a1)
+ ldc1 $f4, 2*8($a1)
+ ldc1 $f6, 3*8($a1)
+ ldc1 $f8, 4*8($a1)
+ ldc1 $f10, 5*8($a1)
+ ldc1 $f12, 6*8($a1)
+ ldc1 $f14, 7*8($a1)
+ ldc1 $f16, 8*8($a1)
+ ldc1 $f18, 9*8($a1)
+ ldc1 $f20, 10*8($a1)
+ ldc1 $f22, 11*8($a1)
+ ldc1 $f24, 12*8($a1)
+ ldc1 $f26, 13*8($a1)
+ ldc1 $f28, 14*8($a1)
+ ldc1 $f30, 15*8($a1)
.set push
.set nomacro
@@ -1067,7 +1078,8 @@ loopEnd:
jalr $zero, $ra
sw $v1, 4($t0) # store the other half of the result
5:
- SDu $f0, $f1, 0, $t0, $t1 # store floating point result
+ CHECK_ALIGNMENT $t0, $t1, 8
+ sdc1 $f0, 0($t0) # store floating point result
jalr $zero, $ra
nop
@@ -1225,7 +1237,8 @@ loopEndS:
jalr $zero, $ra
sw $v1, 4($t0) # store the other half of the result
6:
- SDu $f0, $f1, 0, $t0, $t1 # store floating point result
+ CHECK_ALIGNMENT $t0, $t1, 8
+ sdc1 $f0, 0($t0) # store floating point result
jalr $zero, $ra
nop
@@ -2252,7 +2265,7 @@ ENTRY art_quick_generic_jni_trampoline
move $a0, rSELF # pass Thread::Current
move $a2, $v0 # pass result
move $a3, $v1
- addiu $sp, $sp, -24 # reserve arg slots
+ addiu $sp, $sp, -32 # reserve arg slots
la $t9, artQuickGenericJniEndTrampoline
jalr $t9
s.d $f0, 16($sp) # pass result_f
@@ -3215,7 +3228,7 @@ ENTRY art_quick_invoke_polymorphic
sw $zero, 20($sp) # Initialize JValue result.
sw $zero, 16($sp)
la $t9, artInvokePolymorphic
- jalr $t9 # (result, receiver, Thread*, context)
+ jalr $t9 # artInvokePolymorphic(result, receiver, Thread*, context)
addiu $a0, $sp, 16 # Make $a0 a pointer to the JValue result
.macro MATCH_RETURN_TYPE c, handler
li $t0, \c
@@ -3243,7 +3256,8 @@ ENTRY art_quick_invoke_polymorphic
lhu $v0, 16($sp) # Move char from JValue result to return value register.
.Lstore_double_result:
.Lstore_float_result:
- LDu $f0, $f1, 16, $sp, $t0 # Move double/float from JValue result to return value register.
+ CHECK_ALIGNMENT $sp, $t0
+ ldc1 $f0, 16($sp) # Move double/float from JValue result to return value register.
b .Lcleanup_and_return
nop
.Lstore_long_result:
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/quick_method_frame_info_mips.h
index 45a21ab942..8c86252152 100644
--- a/runtime/arch/mips/quick_method_frame_info_mips.h
+++ b/runtime/arch/mips/quick_method_frame_info_mips.h
@@ -35,8 +35,24 @@ static constexpr uint32_t kMipsCalleeSaveRefSpills =
static constexpr uint32_t kMipsCalleeSaveArgSpills =
(1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3) | (1 << art::mips::T0) |
(1 << art::mips::T1);
+// We want to save all floating point register pairs at addresses
+// which are multiples of 8 so that we can eliminate use of the
+// SDu/LDu macros by using sdc1/ldc1 to store/load floating
+// register values using a single instruction. Because integer
+// registers are stored at the top of the frame, to achieve having
+// the floating point register pairs aligned on multiples of 8 the
+// number of integer registers saved must be even. Previously, the
+// only case in which we saved floating point registers beneath an
+// odd number of integer registers was when "type" is
+// CalleeSaveType::kSaveAllCalleeSaves. (There are other cases in
+// which an odd number of integer registers are saved but those
+// cases don't save any floating point registers. If no floating
+// point registers are saved we don't care if the number of integer
+// registers saved is odd or even). To save an even number of
+// integer registers in this particular case we add the ZERO
+// register to the list of registers which get saved.
static constexpr uint32_t kMipsCalleeSaveAllSpills =
- (1 << art::mips::S0) | (1 << art::mips::S1);
+ (1 << art::mips::ZERO) | (1 << art::mips::S0) | (1 << art::mips::S1);
static constexpr uint32_t kMipsCalleeSaveEverythingSpills =
(1 << art::mips::AT) | (1 << art::mips::V0) | (1 << art::mips::V1) |
(1 << art::mips::A0) | (1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3) |
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index 9d77ebcd22..bdce520937 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -52,7 +52,7 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
// get the method from the top of the stack. However it's in r0.
uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr); // BVA addr
uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
- reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kMips64));
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kMips64));
if (overflow_addr == fault_addr) {
*out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips64::A0]);
} else {
@@ -126,7 +126,7 @@ bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, voi
VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
", fault_addr: " << fault_addr;
- uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kMips64);
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kMips64);
// Check that the fault address is the value expected for a stack overflow.
if (fault_addr != overflow_addr) {
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
index 08d0bac2c3..ea9f84bd2d 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64.cc
@@ -89,7 +89,7 @@ Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromAssembly() {
}
bool Mips64InstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
- if (kMips64 != other->GetInstructionSet()) {
+ if (InstructionSet::kMips64 != other->GetInstructionSet()) {
return false;
}
const Mips64InstructionSetFeatures* other_as_mips64 = other->AsMips64InstructionSetFeatures();
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h
index d9f30c755e..27e544ed91 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.h
+++ b/runtime/arch/mips64/instruction_set_features_mips64.h
@@ -51,7 +51,7 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
InstructionSet GetInstructionSet() const OVERRIDE {
- return kMips64;
+ return InstructionSet::kMips64;
}
uint32_t AsBitmap() const OVERRIDE;
diff --git a/runtime/arch/mips64/instruction_set_features_mips64_test.cc b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
index 0ba0bd4c15..933dc668a7 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64_test.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
@@ -23,9 +23,9 @@ namespace art {
TEST(Mips64InstructionSetFeaturesTest, Mips64FeaturesFromDefaultVariant) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> mips64_features(
- InstructionSetFeatures::FromVariant(kMips64, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips64, "default", &error_msg));
ASSERT_TRUE(mips64_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips64_features->GetInstructionSet(), kMips64);
+ EXPECT_EQ(mips64_features->GetInstructionSet(), InstructionSet::kMips64);
EXPECT_TRUE(mips64_features->Equals(mips64_features.get()));
EXPECT_STREQ("msa", mips64_features->GetFeatureString().c_str());
EXPECT_EQ(mips64_features->AsBitmap(), 1U);
@@ -34,15 +34,15 @@ TEST(Mips64InstructionSetFeaturesTest, Mips64FeaturesFromDefaultVariant) {
TEST(Mips64InstructionSetFeaturesTest, Mips64FeaturesFromR6Variant) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> mips64r6_features(
- InstructionSetFeatures::FromVariant(kMips64, "mips64r6", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips64, "mips64r6", &error_msg));
ASSERT_TRUE(mips64r6_features.get() != nullptr) << error_msg;
- EXPECT_EQ(mips64r6_features->GetInstructionSet(), kMips64);
+ EXPECT_EQ(mips64r6_features->GetInstructionSet(), InstructionSet::kMips64);
EXPECT_TRUE(mips64r6_features->Equals(mips64r6_features.get()));
EXPECT_STREQ("msa", mips64r6_features->GetFeatureString().c_str());
EXPECT_EQ(mips64r6_features->AsBitmap(), 1U);
std::unique_ptr<const InstructionSetFeatures> mips64_default_features(
- InstructionSetFeatures::FromVariant(kMips64, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kMips64, "default", &error_msg));
ASSERT_TRUE(mips64_default_features.get() != nullptr) << error_msg;
EXPECT_TRUE(mips64r6_features->Equals(mips64_default_features.get()));
}
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index d4ad275f35..98ffe6504a 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -3028,7 +3028,7 @@ ENTRY art_quick_invoke_polymorphic
daddiu $sp, $sp, -8 # Reserve space for JValue result.
.cfi_adjust_cfa_offset 8
sd $zero, 0($sp) # Initialize JValue result.
- jal artInvokePolymorphic # (result, receiver, Thread*, context)
+ jal artInvokePolymorphic # artInvokePolymorphic(result, receiver, Thread*, context)
move $a0, $sp # Make $a0 a pointer to the JValue result
.macro MATCH_RETURN_TYPE c, handler
li $t0, \c
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 349ce3bbe1..527332fe9a 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -291,9 +291,9 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr);
uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
#if defined(__x86_64__)
- reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kX86_64));
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kX86_64));
#else
- reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kX86));
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kX86));
#endif
if (overflow_addr == fault_addr) {
*out_method = reinterpret_cast<ArtMethod*>(uc->CTX_METHOD);
@@ -445,9 +445,9 @@ bool StackOverflowHandler::Action(int, siginfo_t* info, void* context) {
", fault_addr: " << fault_addr;
#if defined(__x86_64__)
- uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kX86_64);
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kX86_64);
#else
- uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kX86);
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kX86);
#endif
// Check that the fault address is the value expected for a stack overflow.
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 56cb07ea50..57cf4b2741 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -54,7 +54,7 @@ class X86InstructionSetFeatures : public InstructionSetFeatures {
bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
virtual InstructionSet GetInstructionSet() const OVERRIDE {
- return kX86;
+ return InstructionSet::kX86;
}
uint32_t AsBitmap() const OVERRIDE;
diff --git a/runtime/arch/x86/instruction_set_features_x86_test.cc b/runtime/arch/x86/instruction_set_features_x86_test.cc
index c67b4ddfe0..33eac0f0a6 100644
--- a/runtime/arch/x86/instruction_set_features_x86_test.cc
+++ b/runtime/arch/x86/instruction_set_features_x86_test.cc
@@ -23,9 +23,9 @@ namespace art {
TEST(X86InstructionSetFeaturesTest, X86FeaturesFromDefaultVariant) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> x86_features(
- InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86, "default", &error_msg));
ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+ EXPECT_EQ(x86_features->GetInstructionSet(), InstructionSet::kX86);
EXPECT_TRUE(x86_features->Equals(x86_features.get()));
EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
x86_features->GetFeatureString().c_str());
@@ -36,9 +36,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromAtomVariant) {
// Build features for a 32-bit x86 atom processor.
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> x86_features(
- InstructionSetFeatures::FromVariant(kX86, "atom", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86, "atom", &error_msg));
ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+ EXPECT_EQ(x86_features->GetInstructionSet(), InstructionSet::kX86);
EXPECT_TRUE(x86_features->Equals(x86_features.get()));
EXPECT_STREQ("ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
x86_features->GetFeatureString().c_str());
@@ -46,9 +46,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromAtomVariant) {
// Build features for a 32-bit x86 default processor.
std::unique_ptr<const InstructionSetFeatures> x86_default_features(
- InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86, "default", &error_msg));
ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
+ EXPECT_EQ(x86_default_features->GetInstructionSet(), InstructionSet::kX86);
EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
x86_default_features->GetFeatureString().c_str());
@@ -56,9 +56,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromAtomVariant) {
// Build features for a 64-bit x86-64 atom processor.
std::unique_ptr<const InstructionSetFeatures> x86_64_features(
- InstructionSetFeatures::FromVariant(kX86_64, "atom", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86_64, "atom", &error_msg));
ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+ EXPECT_EQ(x86_64_features->GetInstructionSet(), InstructionSet::kX86_64);
EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
EXPECT_STREQ("ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
x86_64_features->GetFeatureString().c_str());
@@ -73,9 +73,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromSandybridgeVariant) {
// Build features for a 32-bit x86 sandybridge processor.
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> x86_features(
- InstructionSetFeatures::FromVariant(kX86, "sandybridge", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86, "sandybridge", &error_msg));
ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+ EXPECT_EQ(x86_features->GetInstructionSet(), InstructionSet::kX86);
EXPECT_TRUE(x86_features->Equals(x86_features.get()));
EXPECT_STREQ("ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt",
x86_features->GetFeatureString().c_str());
@@ -83,9 +83,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromSandybridgeVariant) {
// Build features for a 32-bit x86 default processor.
std::unique_ptr<const InstructionSetFeatures> x86_default_features(
- InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86, "default", &error_msg));
ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
+ EXPECT_EQ(x86_default_features->GetInstructionSet(), InstructionSet::kX86);
EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
x86_default_features->GetFeatureString().c_str());
@@ -93,9 +93,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromSandybridgeVariant) {
// Build features for a 64-bit x86-64 sandybridge processor.
std::unique_ptr<const InstructionSetFeatures> x86_64_features(
- InstructionSetFeatures::FromVariant(kX86_64, "sandybridge", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86_64, "sandybridge", &error_msg));
ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+ EXPECT_EQ(x86_64_features->GetInstructionSet(), InstructionSet::kX86_64);
EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
EXPECT_STREQ("ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt",
x86_64_features->GetFeatureString().c_str());
@@ -110,9 +110,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromSilvermontVariant) {
// Build features for a 32-bit x86 silvermont processor.
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> x86_features(
- InstructionSetFeatures::FromVariant(kX86, "silvermont", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86, "silvermont", &error_msg));
ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+ EXPECT_EQ(x86_features->GetInstructionSet(), InstructionSet::kX86);
EXPECT_TRUE(x86_features->Equals(x86_features.get()));
EXPECT_STREQ("ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt",
x86_features->GetFeatureString().c_str());
@@ -120,9 +120,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromSilvermontVariant) {
// Build features for a 32-bit x86 default processor.
std::unique_ptr<const InstructionSetFeatures> x86_default_features(
- InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86, "default", &error_msg));
ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
+ EXPECT_EQ(x86_default_features->GetInstructionSet(), InstructionSet::kX86);
EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
x86_default_features->GetFeatureString().c_str());
@@ -130,9 +130,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromSilvermontVariant) {
// Build features for a 64-bit x86-64 silvermont processor.
std::unique_ptr<const InstructionSetFeatures> x86_64_features(
- InstructionSetFeatures::FromVariant(kX86_64, "silvermont", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86_64, "silvermont", &error_msg));
ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+ EXPECT_EQ(x86_64_features->GetInstructionSet(), InstructionSet::kX86_64);
EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
EXPECT_STREQ("ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt",
x86_64_features->GetFeatureString().c_str());
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index eecca58a41..25716dc1bb 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -2419,7 +2419,7 @@ DEFINE_FUNCTION art_quick_invoke_polymorphic
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass receiver (method handle)
PUSH eax // pass JResult
- call SYMBOL(artInvokePolymorphic) // (result, receiver, Thread*, SP)
+ call SYMBOL(artInvokePolymorphic) // artInvokePolymorphic(result, receiver, Thread*, SP)
subl LITERAL('A'), %eax // Eliminate out of bounds options
cmpb LITERAL('Z' - 'A'), %al
ja .Lcleanup_and_return
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h
index 83f4093682..e76490ba13 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64.h
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h
@@ -60,7 +60,7 @@ class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
}
InstructionSet GetInstructionSet() const OVERRIDE {
- return kX86_64;
+ return InstructionSet::kX86_64;
}
virtual ~X86_64InstructionSetFeatures() {}
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc b/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc
index 3c2ceacc35..2b307daea9 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc
@@ -23,9 +23,9 @@ namespace art {
TEST(X86_64InstructionSetFeaturesTest, X86Features) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> x86_64_features(
- InstructionSetFeatures::FromVariant(kX86_64, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(InstructionSet::kX86_64, "default", &error_msg));
ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+ EXPECT_EQ(x86_64_features->GetInstructionSet(), InstructionSet::kX86_64);
EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
x86_64_features->GetFeatureString().c_str());
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index fced9954a9..4a328e8d60 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -299,10 +299,26 @@ inline bool ArtField::IsPrimitiveType() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetTypeAsPrimitiveType() != Primitive::kPrimNot;
}
-template <bool kResolve>
-inline ObjPtr<mirror::Class> ArtField::GetType() {
- // TODO: Refactor this function into two functions, ResolveType() and LookupType()
- // so that we can properly annotate it with no-suspension possible / suspension possible.
+inline ObjPtr<mirror::Class> ArtField::LookupType() {
+ ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+ const uint32_t field_index = GetDexFieldIndex();
+ ObjPtr<mirror::Class> declaring_class = GetDeclaringClass();
+ if (UNLIKELY(declaring_class->IsProxyClass())) {
+ return ProxyFindSystemClass(GetTypeDescriptor());
+ }
+ ObjPtr<mirror::DexCache> dex_cache = declaring_class->GetDexCache();
+ const DexFile* const dex_file = dex_cache->GetDexFile();
+ dex::TypeIndex type_idx = dex_file->GetFieldId(field_index).type_idx_;
+ ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(type_idx);
+ if (UNLIKELY(type == nullptr)) {
+ type = Runtime::Current()->GetClassLinker()->LookupResolvedType(
+ *dex_file, type_idx, dex_cache, declaring_class->GetClassLoader());
+ DCHECK(!Thread::Current()->IsExceptionPending());
+ }
+ return type.Ptr();
+}
+
+inline ObjPtr<mirror::Class> ArtField::ResolveType() {
const uint32_t field_index = GetDexFieldIndex();
ObjPtr<mirror::Class> declaring_class = GetDeclaringClass();
if (UNLIKELY(declaring_class->IsProxyClass())) {
@@ -310,18 +326,12 @@ inline ObjPtr<mirror::Class> ArtField::GetType() {
}
auto* dex_cache = declaring_class->GetDexCache();
const DexFile* const dex_file = dex_cache->GetDexFile();
- const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index);
- ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(field_id.type_idx_);
+ dex::TypeIndex type_idx = dex_file->GetFieldId(field_index).type_idx_;
+ ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(type_idx);
if (UNLIKELY(type == nullptr)) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- if (kResolve) {
- type = class_linker->ResolveType(*dex_file, field_id.type_idx_, declaring_class);
- CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
- } else {
- type = class_linker->LookupResolvedType(
- *dex_file, field_id.type_idx_, dex_cache, declaring_class->GetClassLoader());
- DCHECK(!Thread::Current()->IsExceptionPending());
- }
+ type = class_linker->ResolveType(*dex_file, type_idx, declaring_class);
+ DCHECK_EQ(type == nullptr, Thread::Current()->IsExceptionPending());
}
return type;
}
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 5114578933..866bf0bc70 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -205,8 +205,8 @@ class ArtField FINAL {
bool IsPrimitiveType() REQUIRES_SHARED(Locks::mutator_lock_);
- template <bool kResolve>
- ObjPtr<mirror::Class> GetType() REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::Class> LookupType() REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::Class> ResolveType() REQUIRES_SHARED(Locks::mutator_lock_);
size_t FieldSize() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 4181169f5d..12b4d16b37 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -103,6 +103,7 @@ inline uint32_t ArtMethod::GetDexMethodIndex() {
}
inline ObjPtr<mirror::Class> ArtMethod::LookupResolvedClassFromTypeIndex(dex::TypeIndex type_idx) {
+ ScopedAssertNoThreadSuspension ants(__FUNCTION__);
ObjPtr<mirror::DexCache> dex_cache = GetDexCache();
ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(type_idx);
if (UNLIKELY(type == nullptr)) {
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index d4297df76f..b5e0f66575 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -166,6 +166,8 @@ InvokeType ArtMethod::GetInvokeType() {
return kInterface;
} else if (IsDirect()) {
return kDirect;
+ } else if (IsPolymorphicSignature()) {
+ return kPolymorphic;
} else {
return kVirtual;
}
@@ -415,6 +417,17 @@ bool ArtMethod::IsOverridableByDefaultMethod() {
return GetDeclaringClass()->IsInterface();
}
+bool ArtMethod::IsPolymorphicSignature() {
+ // Methods with a polymorphic signature have constraints that they
+ // are native and varargs and belong to either MethodHandle or VarHandle.
+ if (!IsNative() || !IsVarargs()) {
+ return false;
+ }
+ mirror::Class* cls = GetDeclaringClass();
+ return (cls == WellKnownClasses::ToClass(WellKnownClasses::java_lang_invoke_MethodHandle) ||
+ cls == WellKnownClasses::ToClass(WellKnownClasses::java_lang_invoke_VarHandle));
+}
+
bool ArtMethod::IsAnnotatedWithFastNative() {
return IsAnnotatedWith(WellKnownClasses::dalvik_annotation_optimization_FastNative,
DexFile::kDexVisibilityBuild,
diff --git a/runtime/art_method.h b/runtime/art_method.h
index caef81c601..ca2e34e071 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -271,6 +271,8 @@ class ArtMethod FINAL {
bool IsProxyMethod() REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsPolymorphicSignature() REQUIRES_SHARED(Locks::mutator_lock_);
+
bool SkipAccessChecks() {
return (GetAccessFlags() & kAccSkipAccessChecks) != 0;
}
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index e18f11080f..3cf2b93690 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -110,9 +110,9 @@ ADD_TEST_EQ(SHADOWFRAME_RESULT_REGISTER_OFFSET,
#define SHADOWFRAME_DEX_PC_PTR_OFFSET (SHADOWFRAME_LINK_OFFSET + 3 * __SIZEOF_POINTER__)
ADD_TEST_EQ(SHADOWFRAME_DEX_PC_PTR_OFFSET,
static_cast<int32_t>(art::ShadowFrame::DexPCPtrOffset()))
-#define SHADOWFRAME_CODE_ITEM_OFFSET (SHADOWFRAME_LINK_OFFSET + 4 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(SHADOWFRAME_CODE_ITEM_OFFSET,
- static_cast<int32_t>(art::ShadowFrame::CodeItemOffset()))
+#define SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET (SHADOWFRAME_LINK_OFFSET + 4 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::DexInstructionsOffset()))
#define SHADOWFRAME_LOCK_COUNT_DATA_OFFSET (SHADOWFRAME_LINK_OFFSET + 5 * __SIZEOF_POINTER__)
ADD_TEST_EQ(SHADOWFRAME_LOCK_COUNT_DATA_OFFSET,
static_cast<int32_t>(art::ShadowFrame::LockCountDataOffset()))
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 09eae40a6b..d8621cc2e6 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -47,7 +47,7 @@ class Mutex;
class QuasiAtomic {
static constexpr bool NeedSwapMutexes(InstructionSet isa) {
// TODO - mips64 still need this for Cas64 ???
- return (isa == kMips) || (isa == kMips64);
+ return (isa == InstructionSet::kMips) || (isa == InstructionSet::kMips64);
}
public:
diff --git a/runtime/base/bit_vector-inl.h b/runtime/base/bit_vector-inl.h
index 08877987b1..0e67f77e19 100644
--- a/runtime/base/bit_vector-inl.h
+++ b/runtime/base/bit_vector-inl.h
@@ -65,6 +65,24 @@ inline uint32_t BitVector::IndexIterator::FindIndex(uint32_t start_index) const
return word_index * 32u + CTZ(word);
}
+inline BitVector::IndexIterator::IndexIterator(const BitVector* bit_vector, begin_tag)
+ : bit_storage_(bit_vector->GetRawStorage()),
+ storage_size_(bit_vector->storage_size_),
+ bit_index_(FindIndex(0u)) { }
+
+inline BitVector::IndexIterator::IndexIterator(const BitVector* bit_vector, end_tag)
+ : bit_storage_(bit_vector->GetRawStorage()),
+ storage_size_(bit_vector->storage_size_),
+ bit_index_(BitSize()) { }
+
+inline BitVector::IndexIterator BitVector::IndexContainer::begin() const {
+ return IndexIterator(bit_vector_, IndexIterator::begin_tag());
+}
+
+inline BitVector::IndexIterator BitVector::IndexContainer::end() const {
+ return IndexIterator(bit_vector_, IndexIterator::end_tag());
+}
+
inline void BitVector::ClearAllBits() {
memset(storage_, 0, storage_size_ * kWordBytes);
}
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
index 56090672ce..564092a1a2 100644
--- a/runtime/base/bit_vector.h
+++ b/runtime/base/bit_vector.h
@@ -70,15 +70,8 @@ class BitVector {
struct begin_tag { };
struct end_tag { };
- IndexIterator(const BitVector* bit_vector, begin_tag)
- : bit_storage_(bit_vector->GetRawStorage()),
- storage_size_(bit_vector->storage_size_),
- bit_index_(FindIndex(0u)) { }
-
- IndexIterator(const BitVector* bit_vector, end_tag)
- : bit_storage_(bit_vector->GetRawStorage()),
- storage_size_(bit_vector->storage_size_),
- bit_index_(BitSize()) { }
+ IndexIterator(const BitVector* bit_vector, begin_tag);
+ IndexIterator(const BitVector* bit_vector, end_tag);
uint32_t BitSize() const {
return storage_size_ * kWordBits;
@@ -99,13 +92,8 @@ class BitVector {
public:
explicit IndexContainer(const BitVector* bit_vector) : bit_vector_(bit_vector) { }
- IndexIterator begin() const {
- return IndexIterator(bit_vector_, IndexIterator::begin_tag());
- }
-
- IndexIterator end() const {
- return IndexIterator(bit_vector_, IndexIterator::end_tag());
- }
+ IndexIterator begin() const;
+ IndexIterator end() const;
private:
const BitVector* const bit_vector_;
diff --git a/runtime/base/debug_stack.h b/runtime/base/debug_stack.h
index e19aecb712..886065db30 100644
--- a/runtime/base/debug_stack.h
+++ b/runtime/base/debug_stack.h
@@ -94,11 +94,19 @@ class DebugStackReferenceImpl {
DebugStackReferenceImpl(const DebugStackReferenceImpl& other)
: counter_(other.counter_), ref_count_(counter_->IncrementRefCount()) {
}
+ DebugStackReferenceImpl(DebugStackReferenceImpl&& other)
+ : counter_(other.counter_), ref_count_(other.ref_count_) {
+ other.counter_ = nullptr;
+ }
DebugStackReferenceImpl& operator=(const DebugStackReferenceImpl& other) {
CHECK(counter_ == other.counter_);
return *this;
}
- ~DebugStackReferenceImpl() { counter_->DecrementRefCount(); }
+ ~DebugStackReferenceImpl() {
+ if (counter_ != nullptr) {
+ counter_->DecrementRefCount();
+ }
+ }
void CheckTop() { CHECK_EQ(counter_->GetRefCount(), ref_count_); }
private:
diff --git a/runtime/base/file_magic.cc b/runtime/base/file_magic.cc
index 30b4f0559d..dffb9b43a1 100644
--- a/runtime/base/file_magic.cc
+++ b/runtime/base/file_magic.cc
@@ -37,19 +37,26 @@ File OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_
*error_msg = StringPrintf("Unable to open '%s' : %s", filename, strerror(errno));
return File();
}
- int n = TEMP_FAILURE_RETRY(read(fd.Fd(), magic, sizeof(*magic)));
- if (n != sizeof(*magic)) {
- *error_msg = StringPrintf("Failed to find magic in '%s'", filename);
- return File();
- }
- if (lseek(fd.Fd(), 0, SEEK_SET) != 0) {
- *error_msg = StringPrintf("Failed to seek to beginning of file '%s' : %s", filename,
- strerror(errno));
+ if (!ReadMagicAndReset(fd.Fd(), magic, error_msg)) {
+ StringPrintf("Error in reading magic from file %s: %s", filename, error_msg->c_str());
return File();
}
return fd;
}
+bool ReadMagicAndReset(int fd, uint32_t* magic, std::string* error_msg) {
+ int n = TEMP_FAILURE_RETRY(read(fd, magic, sizeof(*magic)));
+ if (n != sizeof(*magic)) {
+ *error_msg = StringPrintf("Failed to find magic");
+ return false;
+ }
+ if (lseek(fd, 0, SEEK_SET) != 0) {
+ *error_msg = StringPrintf("Failed to seek to beginning of file : %s", strerror(errno));
+ return false;
+ }
+ return true;
+}
+
bool IsZipMagic(uint32_t magic) {
return (('P' == ((magic >> 0) & 0xff)) &&
('K' == ((magic >> 8) & 0xff)));
diff --git a/runtime/base/file_magic.h b/runtime/base/file_magic.h
index 1c9effdb50..e7bd706a5c 100644
--- a/runtime/base/file_magic.h
+++ b/runtime/base/file_magic.h
@@ -27,6 +27,9 @@ namespace art {
// Open file and read magic number
File OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg);
+// Read magic number and reset pointer to SEEK_SET.
+bool ReadMagicAndReset(int fd, uint32_t* magic, std::string* error_msg);
+
// Check whether the given magic matches a known file type.
bool IsZipMagic(uint32_t magic);
diff --git a/runtime/base/file_utils.cc b/runtime/base/file_utils.cc
new file mode 100644
index 0000000000..323a06519d
--- /dev/null
+++ b/runtime/base/file_utils.cc
@@ -0,0 +1,356 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "file_utils.h"
+
+#include <inttypes.h>
+#include <pthread.h>
+#include <sys/mman.h> // For madvise
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+// We need dladdr.
+#ifndef __APPLE__
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#define DEFINED_GNU_SOURCE
+#endif
+#include <dlfcn.h>
+#include <libgen.h>
+#ifdef DEFINED_GNU_SOURCE
+#undef _GNU_SOURCE
+#undef DEFINED_GNU_SOURCE
+#endif
+#endif
+
+
+#include <memory>
+
+#include "android-base/stringprintf.h"
+#include "android-base/strings.h"
+
+#include "base/stl_util.h"
+#include "base/unix_file/fd_file.h"
+#include "dex_file-inl.h"
+#include "dex_file_loader.h"
+#include "dex_instruction.h"
+#include "oat_quick_method_header.h"
+#include "os.h"
+#include "scoped_thread_state_change-inl.h"
+#include "utf-inl.h"
+
+#if defined(__APPLE__)
+#include <crt_externs.h>
+#include <sys/syscall.h>
+#include "AvailabilityMacros.h" // For MAC_OS_X_VERSION_MAX_ALLOWED
+#endif
+
+#if defined(__linux__)
+#include <linux/unistd.h>
+#endif
+
+namespace art {
+
+using android::base::StringAppendF;
+using android::base::StringPrintf;
+
+bool ReadFileToString(const std::string& file_name, std::string* result) {
+ File file(file_name, O_RDONLY, false);
+ if (!file.IsOpened()) {
+ return false;
+ }
+
+ std::vector<char> buf(8 * KB);
+ while (true) {
+ int64_t n = TEMP_FAILURE_RETRY(read(file.Fd(), &buf[0], buf.size()));
+ if (n == -1) {
+ return false;
+ }
+ if (n == 0) {
+ return true;
+ }
+ result->append(&buf[0], n);
+ }
+}
+
+bool PrintFileToLog(const std::string& file_name, LogSeverity level) {
+ File file(file_name, O_RDONLY, false);
+ if (!file.IsOpened()) {
+ return false;
+ }
+
+ constexpr size_t kBufSize = 256; // Small buffer. Avoid stack overflow and stack size warnings.
+ char buf[kBufSize + 1]; // +1 for terminator.
+ size_t filled_to = 0;
+ while (true) {
+ DCHECK_LT(filled_to, kBufSize);
+ int64_t n = TEMP_FAILURE_RETRY(read(file.Fd(), &buf[filled_to], kBufSize - filled_to));
+ if (n <= 0) {
+ // Print the rest of the buffer, if it exists.
+ if (filled_to > 0) {
+ buf[filled_to] = 0;
+ LOG(level) << buf;
+ }
+ return n == 0;
+ }
+ // Scan for '\n'.
+ size_t i = filled_to;
+ bool found_newline = false;
+ for (; i < filled_to + n; ++i) {
+ if (buf[i] == '\n') {
+ // Found a line break, that's something to print now.
+ buf[i] = 0;
+ LOG(level) << buf;
+ // Copy the rest to the front.
+ if (i + 1 < filled_to + n) {
+ memmove(&buf[0], &buf[i + 1], filled_to + n - i - 1);
+ filled_to = filled_to + n - i - 1;
+ } else {
+ filled_to = 0;
+ }
+ found_newline = true;
+ break;
+ }
+ }
+ if (found_newline) {
+ continue;
+ } else {
+ filled_to += n;
+ // Check if we must flush now.
+ if (filled_to == kBufSize) {
+ buf[kBufSize] = 0;
+ LOG(level) << buf;
+ filled_to = 0;
+ }
+ }
+ }
+}
+
+std::string GetAndroidRootSafe(std::string* error_msg) {
+ // Prefer ANDROID_ROOT if it's set.
+ const char* android_dir = getenv("ANDROID_ROOT");
+ if (android_dir != nullptr) {
+ if (!OS::DirectoryExists(android_dir)) {
+ *error_msg = StringPrintf("Failed to find ANDROID_ROOT directory %s", android_dir);
+ return "";
+ }
+ return android_dir;
+ }
+
+ // Check where libart is from, and derive from there. Only do this for non-Mac.
+#ifndef __APPLE__
+ {
+ Dl_info info;
+ if (dladdr(reinterpret_cast<const void*>(&GetAndroidRootSafe), /* out */ &info) != 0) {
+ // Make a duplicate of the fname so dirname can modify it.
+ UniqueCPtr<char> fname(strdup(info.dli_fname));
+
+ char* dir1 = dirname(fname.get()); // This is the lib directory.
+ char* dir2 = dirname(dir1); // This is the "system" directory.
+ if (OS::DirectoryExists(dir2)) {
+ std::string tmp = dir2; // Make a copy here so that fname can be released.
+ return tmp;
+ }
+ }
+ }
+#endif
+
+ // Try "/system".
+ if (!OS::DirectoryExists("/system")) {
+ *error_msg = "Failed to find ANDROID_ROOT directory /system";
+ return "";
+ }
+ return "/system";
+}
+
+std::string GetAndroidRoot() {
+ std::string error_msg;
+ std::string ret = GetAndroidRootSafe(&error_msg);
+ if (ret.empty()) {
+ LOG(FATAL) << error_msg;
+ UNREACHABLE();
+ }
+ return ret;
+}
+
+
+static const char* GetAndroidDirSafe(const char* env_var,
+ const char* default_dir,
+ std::string* error_msg) {
+ const char* android_dir = getenv(env_var);
+ if (android_dir == nullptr) {
+ if (OS::DirectoryExists(default_dir)) {
+ android_dir = default_dir;
+ } else {
+ *error_msg = StringPrintf("%s not set and %s does not exist", env_var, default_dir);
+ return nullptr;
+ }
+ }
+ if (!OS::DirectoryExists(android_dir)) {
+ *error_msg = StringPrintf("Failed to find %s directory %s", env_var, android_dir);
+ return nullptr;
+ }
+ return android_dir;
+}
+
+static const char* GetAndroidDir(const char* env_var, const char* default_dir) {
+ std::string error_msg;
+ const char* dir = GetAndroidDirSafe(env_var, default_dir, &error_msg);
+ if (dir != nullptr) {
+ return dir;
+ } else {
+ LOG(FATAL) << error_msg;
+ return nullptr;
+ }
+}
+
+const char* GetAndroidData() {
+ return GetAndroidDir("ANDROID_DATA", "/data");
+}
+
+const char* GetAndroidDataSafe(std::string* error_msg) {
+ return GetAndroidDirSafe("ANDROID_DATA", "/data", error_msg);
+}
+
+std::string GetDefaultBootImageLocation(std::string* error_msg) {
+ std::string android_root = GetAndroidRootSafe(error_msg);
+ if (android_root.empty()) {
+ return "";
+ }
+ return StringPrintf("%s/framework/boot.art", android_root.c_str());
+}
+
+void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string* dalvik_cache,
+ bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache) {
+ CHECK(subdir != nullptr);
+ std::string error_msg;
+ const char* android_data = GetAndroidDataSafe(&error_msg);
+ if (android_data == nullptr) {
+ *have_android_data = false;
+ *dalvik_cache_exists = false;
+ *is_global_cache = false;
+ return;
+ } else {
+ *have_android_data = true;
+ }
+ const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
+ *dalvik_cache = dalvik_cache_root + subdir;
+ *dalvik_cache_exists = OS::DirectoryExists(dalvik_cache->c_str());
+ *is_global_cache = strcmp(android_data, "/data") == 0;
+ if (create_if_absent && !*dalvik_cache_exists && !*is_global_cache) {
+ // Don't create the system's /data/dalvik-cache/... because it needs special permissions.
+ *dalvik_cache_exists = ((mkdir(dalvik_cache_root.c_str(), 0700) == 0 || errno == EEXIST) &&
+ (mkdir(dalvik_cache->c_str(), 0700) == 0 || errno == EEXIST));
+ }
+}
+
+std::string GetDalvikCache(const char* subdir) {
+ CHECK(subdir != nullptr);
+ const char* android_data = GetAndroidData();
+ const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
+ const std::string dalvik_cache = dalvik_cache_root + subdir;
+ if (!OS::DirectoryExists(dalvik_cache.c_str())) {
+ // TODO: Check callers. Traditional behavior is to not abort.
+ return "";
+ }
+ return dalvik_cache;
+}
+
+bool GetDalvikCacheFilename(const char* location, const char* cache_location,
+ std::string* filename, std::string* error_msg) {
+ if (location[0] != '/') {
+ *error_msg = StringPrintf("Expected path in location to be absolute: %s", location);
+ return false;
+ }
+ std::string cache_file(&location[1]); // skip leading slash
+ if (!android::base::EndsWith(location, ".dex") &&
+ !android::base::EndsWith(location, ".art") &&
+ !android::base::EndsWith(location, ".oat")) {
+ cache_file += "/";
+ cache_file += DexFileLoader::kClassesDex;
+ }
+ std::replace(cache_file.begin(), cache_file.end(), '/', '@');
+ *filename = StringPrintf("%s/%s", cache_location, cache_file.c_str());
+ return true;
+}
+
+std::string GetVdexFilename(const std::string& oat_location) {
+ return ReplaceFileExtension(oat_location, "vdex");
+}
+
+static void InsertIsaDirectory(const InstructionSet isa, std::string* filename) {
+ // in = /foo/bar/baz
+ // out = /foo/bar/<isa>/baz
+ size_t pos = filename->rfind('/');
+ CHECK_NE(pos, std::string::npos) << *filename << " " << isa;
+ filename->insert(pos, "/", 1);
+ filename->insert(pos + 1, GetInstructionSetString(isa));
+}
+
+std::string GetSystemImageFilename(const char* location, const InstructionSet isa) {
+ // location = /system/framework/boot.art
+ // filename = /system/framework/<isa>/boot.art
+ std::string filename(location);
+ InsertIsaDirectory(isa, &filename);
+ return filename;
+}
+
+bool FileExists(const std::string& filename) {
+ struct stat buffer;
+ return stat(filename.c_str(), &buffer) == 0;
+}
+
+bool FileExistsAndNotEmpty(const std::string& filename) {
+ struct stat buffer;
+ if (stat(filename.c_str(), &buffer) != 0) {
+ return false;
+ }
+ return buffer.st_size > 0;
+}
+
+std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension) {
+ const size_t last_ext = filename.find_last_of('.');
+ if (last_ext == std::string::npos) {
+ return filename + "." + new_extension;
+ } else {
+ return filename.substr(0, last_ext + 1) + new_extension;
+ }
+}
+
+int64_t GetFileSizeBytes(const std::string& filename) {
+ struct stat stat_buf;
+ int rc = stat(filename.c_str(), &stat_buf);
+ return rc == 0 ? stat_buf.st_size : -1;
+}
+
+int MadviseLargestPageAlignedRegion(const uint8_t* begin, const uint8_t* end, int advice) {
+ DCHECK_LE(begin, end);
+ begin = AlignUp(begin, kPageSize);
+ end = AlignDown(end, kPageSize);
+ if (begin < end) {
+ int result = madvise(const_cast<uint8_t*>(begin), end - begin, advice);
+ if (result != 0) {
+ PLOG(WARNING) << "madvise failed " << result;
+ }
+ return result;
+ }
+ return 0;
+}
+
+} // namespace art
diff --git a/runtime/base/file_utils.h b/runtime/base/file_utils.h
new file mode 100644
index 0000000000..007f3b443d
--- /dev/null
+++ b/runtime/base/file_utils.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_FILE_UTILS_H_
+#define ART_RUNTIME_BASE_FILE_UTILS_H_
+
+#include <stdlib.h>
+
+#include <string>
+
+#include "arch/instruction_set.h"
+#include "base/logging.h"
+
+namespace art {
+
+bool ReadFileToString(const std::string& file_name, std::string* result);
+bool PrintFileToLog(const std::string& file_name, LogSeverity level);
+
+// Find $ANDROID_ROOT, /system, or abort.
+std::string GetAndroidRoot();
+// Find $ANDROID_ROOT, /system, or return an empty string.
+std::string GetAndroidRootSafe(std::string* error_msg);
+
+// Find $ANDROID_DATA, /data, or abort.
+const char* GetAndroidData();
+// Find $ANDROID_DATA, /data, or return null.
+const char* GetAndroidDataSafe(std::string* error_msg);
+
+// Returns the default boot image location (ANDROID_ROOT/framework/boot.art).
+// Returns an empty string if ANDROID_ROOT is not set.
+std::string GetDefaultBootImageLocation(std::string* error_msg);
+
+// Returns the dalvik-cache location, with subdir appended. Returns the empty string if the cache
+// could not be found.
+std::string GetDalvikCache(const char* subdir);
+// Return true if we found the dalvik cache and stored it in the dalvik_cache argument.
+// have_android_data will be set to true if we have an ANDROID_DATA that exists,
+// dalvik_cache_exists will be true if there is a dalvik-cache directory that is present.
+// The flag is_global_cache tells whether this cache is /data/dalvik-cache.
+void GetDalvikCache(const char* subdir, bool create_if_absent, std::string* dalvik_cache,
+ bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache);
+
+// Returns the absolute dalvik-cache path for a DexFile or OatFile. The path returned will be
+// rooted at cache_location.
+bool GetDalvikCacheFilename(const char* file_location, const char* cache_location,
+ std::string* filename, std::string* error_msg);
+
+// Returns the system location for an image
+std::string GetSystemImageFilename(const char* location, InstructionSet isa);
+
+// Returns the vdex filename for the given oat filename.
+std::string GetVdexFilename(const std::string& oat_filename);
+
+// Returns true if the file exists.
+bool FileExists(const std::string& filename);
+bool FileExistsAndNotEmpty(const std::string& filename);
+
+// Returns `filename` with the text after the last occurrence of '.' replaced with
+// `extension`. If `filename` does not contain a period, returns a string containing `filename`,
+// a period, and `new_extension`.
+// Example: ReplaceFileExtension("foo.bar", "abc") == "foo.abc"
+// ReplaceFileExtension("foo", "abc") == "foo.abc"
+std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension);
+
+// Return the file size in bytes or -1 if the file does not exists.
+int64_t GetFileSizeBytes(const std::string& filename);
+
+// Madvise the largest page aligned region within begin and end.
+int MadviseLargestPageAlignedRegion(const uint8_t* begin, const uint8_t* end, int advice);
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_FILE_UTILS_H_
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index abd5166b9c..877f052006 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -1022,7 +1022,7 @@ bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
void Locks::Init() {
if (logging_lock_ != nullptr) {
// Already initialized.
- if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
+ if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
DCHECK(modify_ldt_lock_ != nullptr);
} else {
DCHECK(modify_ldt_lock_ == nullptr);
@@ -1132,7 +1132,7 @@ void Locks::Init() {
DCHECK(allocated_thread_ids_lock_ == nullptr);
allocated_thread_ids_lock_ = new Mutex("allocated thread ids lock", current_lock_level);
- if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
+ if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
DCHECK(modify_ldt_lock_ == nullptr);
modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index caca68d42e..87c4afe96f 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -671,7 +671,7 @@ class Locks {
// When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
// doesn't try to hold a higher level Mutex.
- #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
+ #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::classlinker_classes_lock_)
static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
diff --git a/runtime/base/safe_copy.cc b/runtime/base/safe_copy.cc
index c76ea113d8..b46b921307 100644
--- a/runtime/base/safe_copy.cc
+++ b/runtime/base/safe_copy.cc
@@ -24,7 +24,7 @@
#include <android-base/macros.h>
-#include "runtime/base/bit_utils.h"
+#include "bit_utils.h"
namespace art {
diff --git a/runtime/base/scoped_arena_allocator.cc b/runtime/base/scoped_arena_allocator.cc
index 973f9b93ed..7240842d55 100644
--- a/runtime/base/scoped_arena_allocator.cc
+++ b/runtime/base/scoped_arena_allocator.cc
@@ -48,8 +48,7 @@ void ArenaStack::Reset() {
MemStats ArenaStack::GetPeakStats() const {
DebugStackRefCounter::CheckNoRefs();
- return MemStats("ArenaStack peak", static_cast<const TaggedStats<Peak>*>(&stats_and_pool_),
- bottom_arena_);
+ return MemStats("ArenaStack peak", PeakStats(), bottom_arena_);
}
uint8_t* ArenaStack::AllocateFromNextArena(size_t rounded_bytes) {
@@ -107,18 +106,32 @@ void* ArenaStack::AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind) {
return ptr;
}
+ScopedArenaAllocator::ScopedArenaAllocator(ScopedArenaAllocator&& other)
+ : DebugStackReference(std::move(other)),
+ DebugStackRefCounter(),
+ ArenaAllocatorStats(other),
+ arena_stack_(other.arena_stack_),
+ mark_arena_(other.mark_arena_),
+ mark_ptr_(other.mark_ptr_),
+ mark_end_(other.mark_end_) {
+ other.DebugStackRefCounter::CheckNoRefs();
+ other.arena_stack_ = nullptr;
+}
+
ScopedArenaAllocator::ScopedArenaAllocator(ArenaStack* arena_stack)
- : DebugStackReference(arena_stack),
- DebugStackRefCounter(),
- ArenaAllocatorStats(*arena_stack->CurrentStats()),
- arena_stack_(arena_stack),
- mark_arena_(arena_stack->top_arena_),
- mark_ptr_(arena_stack->top_ptr_),
- mark_end_(arena_stack->top_end_) {
+ : DebugStackReference(arena_stack),
+ DebugStackRefCounter(),
+ ArenaAllocatorStats(*arena_stack->CurrentStats()),
+ arena_stack_(arena_stack),
+ mark_arena_(arena_stack->top_arena_),
+ mark_ptr_(arena_stack->top_ptr_),
+ mark_end_(arena_stack->top_end_) {
}
ScopedArenaAllocator::~ScopedArenaAllocator() {
- DoReset();
+ if (arena_stack_ != nullptr) {
+ DoReset();
+ }
}
void ScopedArenaAllocator::Reset() {
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index f156f526fc..8f50fd443b 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -54,6 +54,7 @@ class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryToo
void Reset();
size_t PeakBytesAllocated() {
+ DebugStackRefCounter::CheckNoRefs();
return PeakStats()->BytesAllocated();
}
@@ -81,6 +82,10 @@ class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryToo
return static_cast<TaggedStats<Peak>*>(&stats_and_pool_);
}
+ const ArenaAllocatorStats* PeakStats() const {
+ return static_cast<const TaggedStats<Peak>*>(&stats_and_pool_);
+ }
+
ArenaAllocatorStats* CurrentStats() {
return static_cast<TaggedStats<Current>*>(&stats_and_pool_);
}
@@ -132,16 +137,7 @@ class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryToo
class ScopedArenaAllocator
: private DebugStackReference, private DebugStackRefCounter, private ArenaAllocatorStats {
public:
- // Create a ScopedArenaAllocator directly on the ArenaStack when the scope of
- // the allocator is not exactly a C++ block scope. For example, an optimization
- // pass can create the scoped allocator in Start() and destroy it in End().
- static ScopedArenaAllocator* Create(ArenaStack* arena_stack) {
- void* addr = arena_stack->Alloc(sizeof(ScopedArenaAllocator), kArenaAllocMisc);
- ScopedArenaAllocator* allocator = new(addr) ScopedArenaAllocator(arena_stack);
- allocator->mark_ptr_ = reinterpret_cast<uint8_t*>(addr);
- return allocator;
- }
-
+ ScopedArenaAllocator(ScopedArenaAllocator&& other);
explicit ScopedArenaAllocator(ArenaStack* arena_stack);
~ScopedArenaAllocator();
@@ -173,7 +169,7 @@ class ScopedArenaAllocator
static void operator delete(void* ptr ATTRIBUTE_UNUSED) {}
private:
- ArenaStack* const arena_stack_;
+ ArenaStack* arena_stack_;
Arena* mark_arena_;
uint8_t* mark_ptr_;
uint8_t* mark_end_;
diff --git a/runtime/cdex/compact_dex_file.cc b/runtime/cdex/compact_dex_file.cc
index dbe2c66ecc..82ffdb0adb 100644
--- a/runtime/cdex/compact_dex_file.cc
+++ b/runtime/cdex/compact_dex_file.cc
@@ -21,6 +21,14 @@ namespace art {
constexpr uint8_t CompactDexFile::kDexMagic[kDexMagicSize];
constexpr uint8_t CompactDexFile::kDexMagicVersion[];
+void CompactDexFile::WriteMagic(uint8_t* magic) {
+ std::copy_n(kDexMagic, kDexMagicSize, magic);
+}
+
+void CompactDexFile::WriteCurrentVersion(uint8_t* magic) {
+ std::copy_n(kDexMagicVersion, kDexVersionLen, magic + kDexMagicSize);
+}
+
bool CompactDexFile::IsMagicValid(const uint8_t* magic) {
return (memcmp(magic, kDexMagic, sizeof(kDexMagic)) == 0);
}
diff --git a/runtime/cdex/compact_dex_file.h b/runtime/cdex/compact_dex_file.h
index fa6eab2d76..8ab9247125 100644
--- a/runtime/cdex/compact_dex_file.h
+++ b/runtime/cdex/compact_dex_file.h
@@ -24,9 +24,18 @@ namespace art {
// CompactDex is a currently ART internal dex file format that aims to reduce storage/RAM usage.
class CompactDexFile : public DexFile {
public:
+ class Header : public DexFile::Header {
+ // Same for now.
+ };
static constexpr uint8_t kDexMagic[kDexMagicSize] = { 'c', 'd', 'e', 'x' };
static constexpr uint8_t kDexMagicVersion[] = {'0', '0', '1', '\0'};
+ // Write the compact dex specific magic.
+ static void WriteMagic(uint8_t* magic);
+
+ // Write the current version, note that the input is the address of the magic.
+ static void WriteCurrentVersion(uint8_t* magic);
+
// Returns true if the byte string points to the magic value.
static bool IsMagicValid(const uint8_t* magic);
virtual bool IsMagicValid() const OVERRIDE;
@@ -35,16 +44,22 @@ class CompactDexFile : public DexFile {
static bool IsVersionValid(const uint8_t* magic);
virtual bool IsVersionValid() const OVERRIDE;
+ bool IsCompactDexFile() const OVERRIDE {
+ return true;
+ }
+
private:
// Not supported yet.
CompactDexFile(const uint8_t* base,
size_t size,
const std::string& location,
uint32_t location_checksum,
- const OatDexFile* oat_dex_file)
- : DexFile(base, size, location, location_checksum, oat_dex_file) {}
+ const OatDexFile* oat_dex_file,
+ DexFileContainer* container)
+ : DexFile(base, size, location, location_checksum, oat_dex_file, container) {}
friend class DexFile;
+ friend class DexFileLoader;
DISALLOW_COPY_AND_ASSIGN(CompactDexFile);
};
diff --git a/runtime/cdex/compact_dex_file_test.cc b/runtime/cdex/compact_dex_file_test.cc
index 6fe4bccd16..b43b35d69a 100644
--- a/runtime/cdex/compact_dex_file_test.cc
+++ b/runtime/cdex/compact_dex_file_test.cc
@@ -32,12 +32,10 @@ TEST_F(CompactDexFileTest, MagicAndVersion) {
const bool valid_magic = (i & 1) == 0;
const bool valid_version = (j & 1) == 0;
if (valid_magic) {
- std::copy_n(CompactDexFile::kDexMagic, CompactDexFile::kDexMagicSize, header);
+ CompactDexFile::WriteMagic(header);
}
if (valid_version) {
- std::copy_n(CompactDexFile::kDexMagicVersion,
- CompactDexFile::kDexVersionLen,
- header + CompactDexFile::kDexMagicSize);
+ CompactDexFile::WriteCurrentVersion(header);
}
EXPECT_EQ(valid_magic, CompactDexFile::IsMagicValid(header));
EXPECT_EQ(valid_version, CompactDexFile::IsVersionValid(header));
diff --git a/tools/cpp-define-generator/offset_codeitem.def b/runtime/cdex/compact_dex_level.h
index e5acd1d93d..b824462bf0 100644
--- a/tools/cpp-define-generator/offset_codeitem.def
+++ b/runtime/cdex/compact_dex_level.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,20 +14,21 @@
* limitations under the License.
*/
-// Offsets within CodeItem.
+#ifndef ART_RUNTIME_CDEX_COMPACT_DEX_LEVEL_H_
+#define ART_RUNTIME_CDEX_COMPACT_DEX_LEVEL_H_
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include <cstddef> // offsetof
-#include "dex_file.h" // art::DexFile
-#endif
+#include "dex_file.h"
-#include "common.def" // DEFINE_OFFSET_EXPR
+namespace art {
-#define DEFINE_CODEITEM_OFFSET(field_name) \
- DEFINE_OFFSET_EXPR(CodeItem, field_name, int32_t, offsetof(art::DexFile::CodeItem, field_name ## _))
+// Optimization level for compact dex generation.
+enum class CompactDexLevel {
+ // Level none means not generated.
+ kCompactDexLevelNone,
+ // Level fast means optimizations that don't take many resources to perform.
+ kCompactDexLevelFast,
+};
-// Field Name
-DEFINE_CODEITEM_OFFSET(insns)
+} // namespace art
-#undef DEFINE_CODEITEM_OFFSET
-#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
+#endif // ART_RUNTIME_CDEX_COMPACT_DEX_LEVEL_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index ba5fe046a0..bd5e18493e 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -98,8 +98,9 @@
#include "mirror/reference-inl.h"
#include "mirror/stack_trace_element.h"
#include "mirror/string-inl.h"
+#include "mirror/var_handle.h"
#include "native/dalvik_system_DexFile.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "oat.h"
#include "oat_file-inl.h"
#include "oat_file.h"
@@ -711,6 +712,12 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
SetClassRoot(kJavaLangReflectMethodArrayClass, class_root);
mirror::Method::SetArrayClass(class_root);
+ // Create java.lang.invoke.CallSite.class root
+ class_root = FindSystemClass(self, "Ljava/lang/invoke/CallSite;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangInvokeCallSite, class_root);
+ mirror::CallSite::SetClass(class_root);
+
// Create java.lang.invoke.MethodType.class root
class_root = FindSystemClass(self, "Ljava/lang/invoke/MethodType;");
CHECK(class_root != nullptr);
@@ -729,11 +736,35 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
SetClassRoot(kJavaLangInvokeMethodHandlesLookup, class_root);
mirror::MethodHandlesLookup::SetClass(class_root);
- // Create java.lang.invoke.CallSite.class root
- class_root = FindSystemClass(self, "Ljava/lang/invoke/CallSite;");
+ // Create java.lang.invoke.VarHandle.class root
+ class_root = FindSystemClass(self, "Ljava/lang/invoke/VarHandle;");
CHECK(class_root != nullptr);
- SetClassRoot(kJavaLangInvokeCallSite, class_root);
- mirror::CallSite::SetClass(class_root);
+ SetClassRoot(kJavaLangInvokeVarHandle, class_root);
+ mirror::VarHandle::SetClass(class_root);
+
+ // Create java.lang.invoke.FieldVarHandle.class root
+ class_root = FindSystemClass(self, "Ljava/lang/invoke/FieldVarHandle;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangInvokeFieldVarHandle, class_root);
+ mirror::FieldVarHandle::SetClass(class_root);
+
+ // Create java.lang.invoke.ArrayElementVarHandle.class root
+ class_root = FindSystemClass(self, "Ljava/lang/invoke/ArrayElementVarHandle;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangInvokeArrayElementVarHandle, class_root);
+ mirror::ArrayElementVarHandle::SetClass(class_root);
+
+ // Create java.lang.invoke.ByteArrayViewVarHandle.class root
+ class_root = FindSystemClass(self, "Ljava/lang/invoke/ByteArrayViewVarHandle;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangInvokeByteArrayViewVarHandle, class_root);
+ mirror::ByteArrayViewVarHandle::SetClass(class_root);
+
+ // Create java.lang.invoke.ByteBufferViewVarHandle.class root
+ class_root = FindSystemClass(self, "Ljava/lang/invoke/ByteBufferViewVarHandle;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangInvokeByteBufferViewVarHandle, class_root);
+ mirror::ByteBufferViewVarHandle::SetClass(class_root);
class_root = FindSystemClass(self, "Ldalvik/system/EmulatedStackFrame;");
CHECK(class_root != nullptr);
@@ -1001,10 +1032,15 @@ bool ClassLinker::InitFromBootImage(std::string* error_msg) {
mirror::Constructor::SetArrayClass(GetClassRoot(kJavaLangReflectConstructorArrayClass));
mirror::Method::SetClass(GetClassRoot(kJavaLangReflectMethod));
mirror::Method::SetArrayClass(GetClassRoot(kJavaLangReflectMethodArrayClass));
- mirror::MethodType::SetClass(GetClassRoot(kJavaLangInvokeMethodType));
+ mirror::CallSite::SetClass(GetClassRoot(kJavaLangInvokeCallSite));
mirror::MethodHandleImpl::SetClass(GetClassRoot(kJavaLangInvokeMethodHandleImpl));
mirror::MethodHandlesLookup::SetClass(GetClassRoot(kJavaLangInvokeMethodHandlesLookup));
- mirror::CallSite::SetClass(GetClassRoot(kJavaLangInvokeCallSite));
+ mirror::MethodType::SetClass(GetClassRoot(kJavaLangInvokeMethodType));
+ mirror::VarHandle::SetClass(GetClassRoot(kJavaLangInvokeVarHandle));
+ mirror::FieldVarHandle::SetClass(GetClassRoot(kJavaLangInvokeFieldVarHandle));
+ mirror::ArrayElementVarHandle::SetClass(GetClassRoot(kJavaLangInvokeArrayElementVarHandle));
+ mirror::ByteArrayViewVarHandle::SetClass(GetClassRoot(kJavaLangInvokeByteArrayViewVarHandle));
+ mirror::ByteBufferViewVarHandle::SetClass(GetClassRoot(kJavaLangInvokeByteBufferViewVarHandle));
mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference));
mirror::BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass));
mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
@@ -1853,6 +1889,7 @@ bool ClassLinker::AddImageSpace(
if (kIsDebugBuild && app_image) {
// This verification needs to happen after the classes have been added to the class loader.
// Since it ensures classes are in the class table.
+ ScopedTrace trace("VerifyAppImage");
VerifyAppImage(header, class_loader, dex_caches, class_table, space);
}
@@ -2116,10 +2153,15 @@ ClassLinker::~ClassLinker() {
mirror::IntArray::ResetArrayClass();
mirror::LongArray::ResetArrayClass();
mirror::ShortArray::ResetArrayClass();
+ mirror::CallSite::ResetClass();
mirror::MethodType::ResetClass();
mirror::MethodHandleImpl::ResetClass();
mirror::MethodHandlesLookup::ResetClass();
- mirror::CallSite::ResetClass();
+ mirror::VarHandle::ResetClass();
+ mirror::FieldVarHandle::ResetClass();
+ mirror::ArrayElementVarHandle::ResetClass();
+ mirror::ByteArrayViewVarHandle::ResetClass();
+ mirror::ByteBufferViewVarHandle::ResetClass();
mirror::EmulatedStackFrame::ResetClass();
Thread* const self = Thread::Current();
for (const ClassLoaderData& data : class_loaders_) {
@@ -7758,12 +7800,6 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file,
Thread::PoisonObjectPointersIfDebug();
ObjPtr<mirror::Class> resolved = dex_cache->GetResolvedType(type_idx);
if (resolved == nullptr) {
- // TODO: Avoid this lookup as it duplicates work done in FindClass(). It is here
- // as a workaround for FastNative JNI to avoid AssertNoPendingException() when
- // trying to resolve annotations while an exception may be pending. Bug: 34659969
- resolved = LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get());
- }
- if (resolved == nullptr) {
Thread* self = Thread::Current();
const char* descriptor = dex_file.StringByTypeIdx(type_idx);
resolved = FindClass(self, descriptor, class_loader);
@@ -8174,23 +8210,23 @@ mirror::MethodHandle* ClassLinker::ResolveMethodHandleForField(
Handle<mirror::Class> return_type;
switch (handle_type) {
case DexFile::MethodHandleType::kStaticPut: {
- method_params->Set(0, target_field->GetType<true>());
+ method_params->Set(0, target_field->ResolveType());
return_type = hs.NewHandle(FindPrimitiveClass('V'));
break;
}
case DexFile::MethodHandleType::kStaticGet: {
- return_type = hs.NewHandle(target_field->GetType<true>());
+ return_type = hs.NewHandle(target_field->ResolveType());
break;
}
case DexFile::MethodHandleType::kInstancePut: {
method_params->Set(0, target_field->GetDeclaringClass());
- method_params->Set(1, target_field->GetType<true>());
+ method_params->Set(1, target_field->ResolveType());
return_type = hs.NewHandle(FindPrimitiveClass('V'));
break;
}
case DexFile::MethodHandleType::kInstanceGet: {
method_params->Set(0, target_field->GetDeclaringClass());
- return_type = hs.NewHandle(target_field->GetType<true>());
+ return_type = hs.NewHandle(target_field->ResolveType());
break;
}
case DexFile::MethodHandleType::kInvokeStatic:
@@ -8540,6 +8576,11 @@ const char* ClassLinker::GetClassRootDescriptor(ClassRoot class_root) {
"Ljava/lang/invoke/MethodHandleImpl;",
"Ljava/lang/invoke/MethodHandles$Lookup;",
"Ljava/lang/invoke/MethodType;",
+ "Ljava/lang/invoke/VarHandle;",
+ "Ljava/lang/invoke/FieldVarHandle;",
+ "Ljava/lang/invoke/ArrayElementVarHandle;",
+ "Ljava/lang/invoke/ByteArrayViewVarHandle;",
+ "Ljava/lang/invoke/ByteBufferViewVarHandle;",
"Ljava/lang/ClassLoader;",
"Ljava/lang/Throwable;",
"Ljava/lang/ClassNotFoundException;",
@@ -8592,7 +8633,7 @@ jobject ClassLinker::CreateWellKnownClassLoader(Thread* self,
ArtField* dex_elements_field =
jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements);
- Handle<mirror::Class> dex_elements_class(hs.NewHandle(dex_elements_field->GetType<true>()));
+ Handle<mirror::Class> dex_elements_class(hs.NewHandle(dex_elements_field->ResolveType()));
DCHECK(dex_elements_class != nullptr);
DCHECK(dex_elements_class->IsArrayClass());
Handle<mirror::ObjectArray<mirror::Object>> h_dex_elements(hs.NewHandle(
@@ -8607,10 +8648,10 @@ jobject ClassLinker::CreateWellKnownClassLoader(Thread* self,
DCHECK_EQ(h_dex_element_class.Get(), element_file_field->GetDeclaringClass());
ArtField* cookie_field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
- DCHECK_EQ(cookie_field->GetDeclaringClass(), element_file_field->GetType<false>());
+ DCHECK_EQ(cookie_field->GetDeclaringClass(), element_file_field->LookupType());
ArtField* file_name_field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_fileName);
- DCHECK_EQ(file_name_field->GetDeclaringClass(), element_file_field->GetType<false>());
+ DCHECK_EQ(file_name_field->GetDeclaringClass(), element_file_field->LookupType());
// Fill the elements array.
int32_t index = 0;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index eba202228c..2d9ec5a440 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -120,6 +120,11 @@ class ClassLinker {
kJavaLangInvokeMethodHandleImpl,
kJavaLangInvokeMethodHandlesLookup,
kJavaLangInvokeMethodType,
+ kJavaLangInvokeVarHandle,
+ kJavaLangInvokeFieldVarHandle,
+ kJavaLangInvokeArrayElementVarHandle,
+ kJavaLangInvokeByteArrayViewVarHandle,
+ kJavaLangInvokeByteBufferViewVarHandle,
kJavaLangClassLoader,
kJavaLangThrowable,
kJavaLangClassNotFoundException,
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index bd736929d8..1b867c0018 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -48,6 +48,7 @@
#include "mirror/reference.h"
#include "mirror/stack_trace_element.h"
#include "mirror/string-inl.h"
+#include "mirror/var_handle.h"
#include "scoped_thread_state_change-inl.h"
#include "standard_dex_file.h"
#include "thread-current-inl.h"
@@ -252,7 +253,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_TRUE(field != nullptr);
EXPECT_OBJ_PTR_EQ(klass, field->GetDeclaringClass());
EXPECT_TRUE(field->GetName() != nullptr);
- EXPECT_TRUE(field->GetType<true>() != nullptr);
+ EXPECT_TRUE(field->ResolveType() != nullptr);
}
void AssertClass(const std::string& descriptor, Handle<mirror::Class> klass)
@@ -361,7 +362,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
MemberOffset current_ref_offset = start_ref_offset;
for (size_t i = 0; i < klass->NumInstanceFields(); i++) {
ArtField* field = klass->GetInstanceField(i);
- ObjPtr<mirror::Class> field_type = field->GetType<true>();
+ ObjPtr<mirror::Class> field_type = field->ResolveType();
ASSERT_TRUE(field_type != nullptr);
if (!field->IsPrimitiveType()) {
ASSERT_TRUE(!field_type->IsPrimitive());
@@ -777,6 +778,39 @@ struct CallSiteOffsets : public CheckOffsets<mirror::CallSite> {
}
};
+struct VarHandleOffsets : public CheckOffsets<mirror::VarHandle> {
+ VarHandleOffsets() : CheckOffsets<mirror::VarHandle>(
+ false, "Ljava/lang/invoke/VarHandle;") {
+ addOffset(OFFSETOF_MEMBER(mirror::VarHandle, access_modes_bit_mask_), "accessModesBitMask");
+ addOffset(OFFSETOF_MEMBER(mirror::VarHandle, coordinate_type0_), "coordinateType0");
+ addOffset(OFFSETOF_MEMBER(mirror::VarHandle, coordinate_type1_), "coordinateType1");
+ addOffset(OFFSETOF_MEMBER(mirror::VarHandle, var_type_), "varType");
+ }
+};
+
+struct FieldVarHandleOffsets : public CheckOffsets<mirror::FieldVarHandle> {
+ FieldVarHandleOffsets() : CheckOffsets<mirror::FieldVarHandle>(
+ false, "Ljava/lang/invoke/FieldVarHandle;") {
+ addOffset(OFFSETOF_MEMBER(mirror::FieldVarHandle, art_field_), "artField");
+ }
+};
+
+struct ByteArrayViewVarHandleOffsets : public CheckOffsets<mirror::ByteArrayViewVarHandle> {
+ ByteArrayViewVarHandleOffsets() : CheckOffsets<mirror::ByteArrayViewVarHandle>(
+ false, "Ljava/lang/invoke/ByteArrayViewVarHandle;") {
+ addOffset(OFFSETOF_MEMBER(mirror::ByteArrayViewVarHandle, native_byte_order_),
+ "nativeByteOrder");
+ }
+};
+
+struct ByteBufferViewVarHandleOffsets : public CheckOffsets<mirror::ByteBufferViewVarHandle> {
+ ByteBufferViewVarHandleOffsets() : CheckOffsets<mirror::ByteBufferViewVarHandle>(
+ false, "Ljava/lang/invoke/ByteBufferViewVarHandle;") {
+ addOffset(OFFSETOF_MEMBER(mirror::ByteBufferViewVarHandle, native_byte_order_),
+ "nativeByteOrder");
+ }
+};
+
// C++ fields must exactly match the fields in the Java classes. If this fails,
// reorder the fields in the C++ class. Managed class fields are ordered by
// ClassLinker::LinkFields.
@@ -802,6 +836,10 @@ TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) {
EXPECT_TRUE(MethodHandlesLookupOffsets().Check());
EXPECT_TRUE(EmulatedStackFrameOffsets().Check());
EXPECT_TRUE(CallSiteOffsets().Check());
+ EXPECT_TRUE(VarHandleOffsets().Check());
+ EXPECT_TRUE(FieldVarHandleOffsets().Check());
+ EXPECT_TRUE(ByteArrayViewVarHandleOffsets().Check());
+ EXPECT_TRUE(ByteBufferViewVarHandleOffsets().Check());
}
TEST_F(ClassLinkerTest, FindClassNonexistent) {
@@ -1466,6 +1504,7 @@ TEST_F(ClassLinkerTest, RegisterDexFileName) {
old_dex_file->Size(),
location->ToModifiedUtf8(),
0u,
+ nullptr,
nullptr));
{
WriterMutexLock mu(soa.Self(), *Locks::dex_lock_);
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index 167533d68a..38f59efdf7 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -230,6 +230,7 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla
// contents. So pass true to verify_checksum.
if (!DexFileLoader::Open(location.c_str(),
location.c_str(),
+ Runtime::Current()->IsVerificationEnabled(),
/*verify_checksum*/ true,
&error_msg,
&info.opened_dex_files)) {
diff --git a/runtime/common_dex_operations.h b/runtime/common_dex_operations.h
index fcc5393490..267735fe95 100644
--- a/runtime/common_dex_operations.h
+++ b/runtime/common_dex_operations.h
@@ -17,10 +17,20 @@
#ifndef ART_RUNTIME_COMMON_DEX_OPERATIONS_H_
#define ART_RUNTIME_COMMON_DEX_OPERATIONS_H_
+#include "android-base/logging.h"
#include "art_field.h"
#include "art_method.h"
+#include "base/macros.h"
+#include "base/mutex.h"
#include "class_linker.h"
+#include "handle_scope-inl.h"
+#include "instrumentation.h"
+#include "interpreter/shadow_frame.h"
#include "interpreter/unstarted_runtime.h"
+#include "mirror/class.h"
+#include "mirror/object.h"
+#include "obj_ptr-inl.h"
+#include "primitive.h"
#include "runtime.h"
#include "stack.h"
#include "thread.h"
@@ -61,6 +71,18 @@ inline void PerformCall(Thread* self,
}
}
+template <typename T>
+inline void DCheckStaticState(Thread* self, T* entity) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kIsDebugBuild) {
+ ObjPtr<mirror::Class> klass = entity->GetDeclaringClass();
+ if (entity->IsStatic()) {
+ klass->AssertInitializedOrInitializingInThread(self);
+ } else {
+ CHECK(klass->IsInitializing() || klass->IsErroneousResolved());
+ }
+ }
+}
+
template<Primitive::Type field_type>
static ALWAYS_INLINE bool DoFieldGetCommon(Thread* self,
const ShadowFrame& shadow_frame,
@@ -68,7 +90,7 @@ static ALWAYS_INLINE bool DoFieldGetCommon(Thread* self,
ArtField* field,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
- field->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
+ DCheckStaticState(self, field);
// Report this field access to instrumentation if needed.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
@@ -126,7 +148,7 @@ ALWAYS_INLINE bool DoFieldPutCommon(Thread* self,
ArtField* field,
JValue& value)
REQUIRES_SHARED(Locks::mutator_lock_) {
- field->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
+ DCheckStaticState(self, field);
// Report this field access to instrumentation if needed. Since we only have the offset of
// the field from the base of the object, we need to look for it first.
@@ -179,7 +201,7 @@ ALWAYS_INLINE bool DoFieldPutCommon(Thread* self,
StackHandleScope<2> hs(self);
HandleWrapperObjPtr<mirror::Object> h_reg(hs.NewHandleWrapper(&reg));
HandleWrapperObjPtr<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
- field_class = field->GetType<true>();
+ field_class = field->ResolveType();
}
if (!reg->VerifierInstanceOf(field_class.Ptr())) {
// This should never happen.
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 0c2e49010e..493468bb3d 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -21,12 +21,13 @@
#include <fcntl.h>
#include <stdlib.h>
#include <cstdio>
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
-#include "../../external/icu/icu4c/source/common/unicode/uvernum.h"
#include "android-base/stringprintf.h"
+#include <unicode/uvernum.h>
#include "art_field-inl.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/stl_util.h"
@@ -333,26 +334,26 @@ std::string CommonRuntimeTestImpl::GetAndroidHostToolsDir() {
std::string CommonRuntimeTestImpl::GetAndroidTargetToolsDir(InstructionSet isa) {
switch (isa) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return GetAndroidToolsDir("prebuilts/gcc/linux-x86/arm",
"arm-linux-androideabi",
"arm-linux-androideabi");
- case kArm64:
+ case InstructionSet::kArm64:
return GetAndroidToolsDir("prebuilts/gcc/linux-x86/aarch64",
"aarch64-linux-android",
"aarch64-linux-android");
- case kX86:
- case kX86_64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
return GetAndroidToolsDir("prebuilts/gcc/linux-x86/x86",
"x86_64-linux-android",
"x86_64-linux-android");
- case kMips:
- case kMips64:
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64:
return GetAndroidToolsDir("prebuilts/gcc/linux-x86/mips",
"mips64el-linux-android",
"mips64el-linux-android");
- case kNone:
+ case InstructionSet::kNone:
break;
}
ADD_FAILURE() << "Invalid isa " << isa;
@@ -373,7 +374,8 @@ std::unique_ptr<const DexFile> CommonRuntimeTestImpl::LoadExpectSingleDexFile(
std::string error_msg;
MemMap::Init();
static constexpr bool kVerifyChecksum = true;
- if (!DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(
+ location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
LOG(FATAL) << "Could not open .dex file '" << location << "': " << error_msg << "\n";
UNREACHABLE();
} else {
@@ -572,8 +574,11 @@ std::vector<std::unique_ptr<const DexFile>> CommonRuntimeTestImpl::OpenTestDexFi
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- bool success = DexFileLoader::Open(
- filename.c_str(), filename.c_str(), kVerifyChecksum, &error_msg, &dex_files);
+ bool success = DexFileLoader::Open(filename.c_str(),
+ filename.c_str(),
+ /* verify */ true,
+ kVerifyChecksum,
+ &error_msg, &dex_files);
CHECK(success) << "Failed to open '" << filename << "': " << error_msg;
for (auto& dex_file : dex_files) {
CHECK_EQ(PROT_READ, dex_file->GetPermissions());
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index e2131f1530..5be8d5b55c 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -253,13 +253,13 @@ class CheckJniAbortCatcher {
}
#define TEST_DISABLED_FOR_MIPS() \
- if (kRuntimeISA == kMips) { \
+ if (kRuntimeISA == InstructionSet::kMips) { \
printf("WARNING: TEST DISABLED FOR MIPS\n"); \
return; \
}
#define TEST_DISABLED_FOR_X86() \
- if (kRuntimeISA == kX86) { \
+ if (kRuntimeISA == InstructionSet::kX86) { \
printf("WARNING: TEST DISABLED FOR X86\n"); \
return; \
}
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index bf2e7062ad..cd52bb6551 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -31,7 +31,7 @@
#include "mirror/method_type.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr-inl.h"
#include "thread.h"
#include "verifier/method_verifier.h"
@@ -433,7 +433,8 @@ void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method,
static bool IsValidReadBarrierImplicitCheck(uintptr_t addr) {
DCHECK(kEmitCompilerReadBarrier);
uint32_t monitor_offset = mirror::Object::MonitorOffset().Uint32Value();
- if (kUseBakerReadBarrier && (kRuntimeISA == kX86 || kRuntimeISA == kX86_64)) {
+ if (kUseBakerReadBarrier &&
+ (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64)) {
constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
monitor_offset += gray_byte_position;
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b021ff1734..c7f245309f 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -55,8 +55,8 @@
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedPrimitiveArray.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_primitive_array.h"
#include "obj_ptr-inl.h"
#include "reflection.h"
#include "safe_map.h"
@@ -346,6 +346,10 @@ bool DebuggerActiveMethodInspectionCallback::IsMethodBeingInspected(ArtMethod* m
return Dbg::IsDebuggerActive();
}
+bool DebuggerActiveMethodInspectionCallback::IsMethodSafeToJit(ArtMethod* m) {
+ return !Dbg::MethodHasAnyBreakpoints(m);
+}
+
// Breakpoints.
static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
@@ -1927,7 +1931,7 @@ static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t
StackHandleScope<2> hs(Thread::Current());
HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
- field_type = f->GetType<true>();
+ field_type = f->ResolveType();
}
if (!field_type->IsAssignableFrom(v->GetClass())) {
return JDWP::ERR_INVALID_OBJECT;
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 18126b1eed..ec37833f6d 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -55,6 +55,7 @@ class Thread;
struct DebuggerActiveMethodInspectionCallback : public MethodInspectionCallback {
bool IsMethodBeingInspected(ArtMethod* m ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsMethodSafeToJit(ArtMethod* m) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index a9bb95480e..5f9b3cf6c4 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -23,6 +23,7 @@
#include <gtest/gtest.h>
+#include "base/file_utils.h"
#include "base/stl_util.h"
#include "common_runtime_test.h"
#include "compiler_callbacks.h"
@@ -84,6 +85,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
std::vector<std::unique_ptr<const DexFile>> multi1;
ASSERT_TRUE(DexFileLoader::Open(GetMultiDexSrc1().c_str(),
GetMultiDexSrc1().c_str(),
+ /* verify */ true,
kVerifyChecksum,
&error_msg,
&multi1)) << error_msg;
@@ -92,6 +94,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
std::vector<std::unique_ptr<const DexFile>> multi2;
ASSERT_TRUE(DexFileLoader::Open(GetMultiDexSrc2().c_str(),
GetMultiDexSrc2().c_str(),
+ /* verify */ true,
kVerifyChecksum,
&error_msg,
&multi2)) << error_msg;
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index f2c43f7f87..974c7acbb2 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -16,13 +16,10 @@
#include "dex_file.h"
-#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/file.h>
-#include <sys/mman.h> // For the PROT_* and MAP_* constants.
#include <zlib.h>
#include <memory>
@@ -35,11 +32,7 @@
#include "base/logging.h"
#include "base/stl_util.h"
#include "dex_file-inl.h"
-#include "dex_file_loader.h"
-#include "jvalue.h"
#include "leb128.h"
-#include "mem_map.h"
-#include "os.h"
#include "standard_dex_file.h"
#include "utf-inl.h"
#include "utils.h"
@@ -59,46 +52,32 @@ uint32_t DexFile::CalculateChecksum() const {
return adler32(adler32(0L, Z_NULL, 0), non_sum_ptr, Size() - non_sum);
}
-struct DexFile::AnnotationValue {
- JValue value_;
- uint8_t type_;
-};
-
int DexFile::GetPermissions() const {
- if (mem_map_.get() == nullptr) {
- return 0;
- } else {
- return mem_map_->GetProtect();
- }
+ CHECK(container_.get() != nullptr);
+ return container_->GetPermissions();
}
bool DexFile::IsReadOnly() const {
- return GetPermissions() == PROT_READ;
+ CHECK(container_.get() != nullptr);
+ return container_->IsReadOnly();
}
bool DexFile::EnableWrite() const {
- CHECK(IsReadOnly());
- if (mem_map_.get() == nullptr) {
- return false;
- } else {
- return mem_map_->Protect(PROT_READ | PROT_WRITE);
- }
+ CHECK(container_.get() != nullptr);
+ return container_->EnableWrite();
}
bool DexFile::DisableWrite() const {
- CHECK(!IsReadOnly());
- if (mem_map_.get() == nullptr) {
- return false;
- } else {
- return mem_map_->Protect(PROT_READ);
- }
+ CHECK(container_.get() != nullptr);
+ return container_->DisableWrite();
}
DexFile::DexFile(const uint8_t* base,
size_t size,
const std::string& location,
uint32_t location_checksum,
- const OatDexFile* oat_dex_file)
+ const OatDexFile* oat_dex_file,
+ DexFileContainer* container)
: begin_(base),
size_(size),
location_(location),
@@ -114,7 +93,8 @@ DexFile::DexFile(const uint8_t* base,
num_method_handles_(0),
call_site_ids_(nullptr),
num_call_site_ids_(0),
- oat_dex_file_(oat_dex_file) {
+ oat_dex_file_(oat_dex_file),
+ container_(container) {
CHECK(begin_ != nullptr) << GetLocation();
CHECK_GT(size_, 0U) << GetLocation();
// Check base (=header) alignment.
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 6868d525e2..f239edc064 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -39,6 +39,21 @@ class Signature;
class StringPiece;
class ZipArchive;
+// Some instances of DexFile own the storage referred to by DexFile. Clients who create
+// such management do so by subclassing Container.
+class DexFileContainer {
+ public:
+ DexFileContainer() { }
+ virtual ~DexFileContainer() { }
+ virtual int GetPermissions() = 0;
+ virtual bool IsReadOnly() = 0;
+ virtual bool EnableWrite() = 0;
+ virtual bool DisableWrite() = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DexFileContainer);
+};
+
// Dex file is the API that exposes native dex files (ordinary dex files) and CompactDex.
// Originally, the dex file format used by ART was mostly the same as APKs. The only change was
// quickened opcodes and layout optimizations.
@@ -63,35 +78,32 @@ class DexFile {
// Raw header_item.
struct Header {
- uint8_t magic_[8];
- uint32_t checksum_; // See also location_checksum_
- uint8_t signature_[kSha1DigestSize];
- uint32_t file_size_; // size of entire file
- uint32_t header_size_; // offset to start of next section
- uint32_t endian_tag_;
- uint32_t link_size_; // unused
- uint32_t link_off_; // unused
- uint32_t map_off_; // unused
- uint32_t string_ids_size_; // number of StringIds
- uint32_t string_ids_off_; // file offset of StringIds array
- uint32_t type_ids_size_; // number of TypeIds, we don't support more than 65535
- uint32_t type_ids_off_; // file offset of TypeIds array
- uint32_t proto_ids_size_; // number of ProtoIds, we don't support more than 65535
- uint32_t proto_ids_off_; // file offset of ProtoIds array
- uint32_t field_ids_size_; // number of FieldIds
- uint32_t field_ids_off_; // file offset of FieldIds array
- uint32_t method_ids_size_; // number of MethodIds
- uint32_t method_ids_off_; // file offset of MethodIds array
- uint32_t class_defs_size_; // number of ClassDefs
- uint32_t class_defs_off_; // file offset of ClassDef array
- uint32_t data_size_; // size of data section
- uint32_t data_off_; // file offset of data section
+ uint8_t magic_[8] = {};
+ uint32_t checksum_ = 0; // See also location_checksum_
+ uint8_t signature_[kSha1DigestSize] = {};
+ uint32_t file_size_ = 0; // size of entire file
+ uint32_t header_size_ = 0; // offset to start of next section
+ uint32_t endian_tag_ = 0;
+ uint32_t link_size_ = 0; // unused
+ uint32_t link_off_ = 0; // unused
+ uint32_t map_off_ = 0; // unused
+ uint32_t string_ids_size_ = 0; // number of StringIds
+ uint32_t string_ids_off_ = 0; // file offset of StringIds array
+ uint32_t type_ids_size_ = 0; // number of TypeIds, we don't support more than 65535
+ uint32_t type_ids_off_ = 0; // file offset of TypeIds array
+ uint32_t proto_ids_size_ = 0; // number of ProtoIds, we don't support more than 65535
+ uint32_t proto_ids_off_ = 0; // file offset of ProtoIds array
+ uint32_t field_ids_size_ = 0; // number of FieldIds
+ uint32_t field_ids_off_ = 0; // file offset of FieldIds array
+ uint32_t method_ids_size_ = 0; // number of MethodIds
+ uint32_t method_ids_off_ = 0; // file offset of MethodIds array
+ uint32_t class_defs_size_ = 0; // number of ClassDefs
+ uint32_t class_defs_off_ = 0; // file offset of ClassDef array
+ uint32_t data_size_ = 0; // size of data section
+ uint32_t data_off_ = 0; // file offset of data section
// Decode the dex magic version
uint32_t GetVersion() const;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Header);
};
// Map item type codes.
@@ -290,8 +302,8 @@ class DexFile {
// Raw code_item.
struct CodeItem {
IterationRange<DexInstructionIterator> Instructions() const {
- return { DexInstructionIterator(insns_),
- DexInstructionIterator(insns_ + insns_size_in_code_units_)};
+ return { DexInstructionIterator(insns_, 0u),
+ DexInstructionIterator(insns_, insns_size_in_code_units_) };
}
const Instruction& InstructionAt(uint32_t dex_pc) const {
@@ -853,14 +865,18 @@ class DexFile {
: reinterpret_cast<const AnnotationSetRefList*>(begin_ + offset);
}
- const AnnotationItem* GetAnnotationItem(const AnnotationSetItem* set_item, uint32_t index) const {
- DCHECK_LE(index, set_item->size_);
- uint32_t offset = set_item->entries_[index];
+ ALWAYS_INLINE const AnnotationItem* GetAnnotationItemAtOffset(uint32_t offset) const {
+ DCHECK_LE(offset, Size());
return (offset == 0)
? nullptr
: reinterpret_cast<const AnnotationItem*>(begin_ + offset);
}
+ const AnnotationItem* GetAnnotationItem(const AnnotationSetItem* set_item, uint32_t index) const {
+ DCHECK_LE(index, set_item->size_);
+ return GetAnnotationItemAtOffset(set_item->entries_[index]);
+ }
+
const AnnotationSetItem* GetSetRefItemItem(const AnnotationSetRefItem* anno_item) const {
uint32_t offset = anno_item->annotations_off_;
return (offset == 0)
@@ -976,12 +992,21 @@ class DexFile {
// Returns a human-readable form of the type at an index.
std::string PrettyType(dex::TypeIndex type_idx) const;
+ // Helper functions.
+ virtual bool IsCompactDexFile() const {
+ return false;
+ }
+ virtual bool IsStandardDexFile() const {
+ return false;
+ }
+
protected:
DexFile(const uint8_t* base,
size_t size,
const std::string& location,
uint32_t location_checksum,
- const OatDexFile* oat_dex_file);
+ const OatDexFile* oat_dex_file,
+ DexFileContainer* container);
// Top-level initializer that calls other Init methods.
bool Init(std::string* error_msg);
@@ -1006,9 +1031,6 @@ class DexFile {
const uint32_t location_checksum_;
- // Manages the underlying memory allocation.
- std::unique_ptr<MemMap> mem_map_;
-
// Points to the header section.
const Header* const header_;
@@ -1047,6 +1069,9 @@ class DexFile {
// null.
mutable const OatDexFile* oat_dex_file_;
+ // Manages the underlying memory allocation.
+ std::unique_ptr<DexFileContainer> container_;
+
friend class DexFileLoader;
friend class DexFileVerifierTest;
friend class OatWriter;
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index fe33bded2b..845202ff72 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -134,8 +134,13 @@ const DexFile::AnnotationSetItem* FindAnnotationSetForField(ArtField* field)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile* dex_file = field->GetDexFile();
ObjPtr<mirror::Class> klass = field->GetDeclaringClass();
+ const DexFile::ClassDef* class_def = klass->GetClassDef();
+ if (class_def == nullptr) {
+ DCHECK(klass->IsProxyClass());
+ return nullptr;
+ }
const DexFile::AnnotationsDirectoryItem* annotations_dir =
- dex_file->GetAnnotationsDirectory(*klass->GetClassDef());
+ dex_file->GetAnnotationsDirectory(*class_def);
if (annotations_dir == nullptr) {
return nullptr;
}
@@ -258,6 +263,9 @@ const uint8_t* SearchEncodedAnnotation(const DexFile& dex_file,
const DexFile::AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (method->IsProxyMethod()) {
+ return nullptr;
+ }
const DexFile* dex_file = method->GetDexFile();
const DexFile::AnnotationsDirectoryItem* annotations_dir =
dex_file->GetAnnotationsDirectory(method->GetClassDef());
@@ -305,8 +313,13 @@ const DexFile::ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod*
const DexFile::AnnotationSetItem* FindAnnotationSetForClass(const ClassData& klass)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass.GetDexFile();
+ const DexFile::ClassDef* class_def = klass.GetClassDef();
+ if (class_def == nullptr) {
+ DCHECK(klass.GetRealClass()->IsProxyClass());
+ return nullptr;
+ }
const DexFile::AnnotationsDirectoryItem* annotations_dir =
- dex_file.GetAnnotationsDirectory(*klass.GetClassDef());
+ dex_file.GetAnnotationsDirectory(*class_def);
if (annotations_dir == nullptr) {
return nullptr;
}
diff --git a/runtime/dex_file_layout.cc b/runtime/dex_file_layout.cc
index 4375d7f799..c3fae15b14 100644
--- a/runtime/dex_file_layout.cc
+++ b/runtime/dex_file_layout.cc
@@ -18,6 +18,7 @@
#include <sys/mman.h>
+#include "base/file_utils.h"
#include "dex_file.h"
#include "utils.h"
diff --git a/runtime/dex_file_loader.cc b/runtime/dex_file_loader.cc
index e300e0e58f..bc9276985b 100644
--- a/runtime/dex_file_loader.cc
+++ b/runtime/dex_file_loader.cc
@@ -33,6 +33,50 @@
namespace art {
+namespace {
+
+class MemMapContainer : public DexFileContainer {
+ public:
+ explicit MemMapContainer(std::unique_ptr<MemMap>&& mem_map) : mem_map_(std::move(mem_map)) { }
+ virtual ~MemMapContainer() OVERRIDE { }
+
+ int GetPermissions() OVERRIDE {
+ if (mem_map_.get() == nullptr) {
+ return 0;
+ } else {
+ return mem_map_->GetProtect();
+ }
+ }
+
+ bool IsReadOnly() OVERRIDE {
+ return GetPermissions() == PROT_READ;
+ }
+
+ bool EnableWrite() OVERRIDE {
+ CHECK(IsReadOnly());
+ if (mem_map_.get() == nullptr) {
+ return false;
+ } else {
+ return mem_map_->Protect(PROT_READ | PROT_WRITE);
+ }
+ }
+
+ bool DisableWrite() OVERRIDE {
+ CHECK(!IsReadOnly());
+ if (mem_map_.get() == nullptr) {
+ return false;
+ } else {
+ return mem_map_->Protect(PROT_READ);
+ }
+ }
+
+ private:
+ std::unique_ptr<MemMap> mem_map_;
+ DISALLOW_COPY_AND_ASSIGN(MemMapContainer);
+};
+
+} // namespace
+
using android::base::StringPrintf;
static constexpr OatDexFile* kNoOatDexFile = nullptr;
@@ -59,11 +103,19 @@ bool DexFileLoader::IsVersionAndMagicValid(const uint8_t* magic) {
bool DexFileLoader::GetMultiDexChecksums(const char* filename,
std::vector<uint32_t>* checksums,
- std::string* error_msg) {
+ std::string* error_msg,
+ int zip_fd) {
CHECK(checksums != nullptr);
uint32_t magic;
- File fd = OpenAndReadMagic(filename, &magic, error_msg);
+ File fd;
+ if (zip_fd != -1) {
+ if (ReadMagicAndReset(zip_fd, &magic, error_msg)) {
+ fd = File(zip_fd, false /* check_usage */);
+ }
+ } else {
+ fd = OpenAndReadMagic(filename, &magic, error_msg);
+ }
if (fd.Fd() == -1) {
DCHECK(!error_msg->empty());
return false;
@@ -151,7 +203,9 @@ std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base,
oat_dex_file,
verify,
verify_checksum,
- error_msg);
+ error_msg,
+ /*container*/ nullptr,
+ /*verify_result*/ nullptr);
}
std::unique_ptr<const DexFile> DexFileLoader::Open(const std::string& location,
@@ -177,15 +231,15 @@ std::unique_ptr<const DexFile> DexFileLoader::Open(const std::string& location,
kNoOatDexFile,
verify,
verify_checksum,
- error_msg);
- if (dex_file != nullptr) {
- dex_file->mem_map_ = std::move(map);
- }
+ error_msg,
+ new MemMapContainer(std::move(map)),
+ /*verify_result*/ nullptr);
return dex_file;
}
bool DexFileLoader::Open(const char* filename,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
@@ -198,12 +252,12 @@ bool DexFileLoader::Open(const char* filename,
return false;
}
if (IsZipMagic(magic)) {
- return OpenZip(fd.Release(), location, verify_checksum, error_msg, dex_files);
+ return OpenZip(fd.Release(), location, verify, verify_checksum, error_msg, dex_files);
}
if (IsMagicValid(magic)) {
std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
location,
- /* verify */ true,
+ verify,
verify_checksum,
error_msg));
if (dex_file.get() != nullptr) {
@@ -219,14 +273,16 @@ bool DexFileLoader::Open(const char* filename,
std::unique_ptr<const DexFile> DexFileLoader::OpenDex(int fd,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg) {
ScopedTrace trace("Open dex file " + std::string(location));
- return OpenFile(fd, location, true /* verify */, verify_checksum, error_msg);
+ return OpenFile(fd, location, verify, verify_checksum, error_msg);
}
bool DexFileLoader::OpenZip(int fd,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
@@ -237,7 +293,8 @@ bool DexFileLoader::OpenZip(int fd,
DCHECK(!error_msg->empty());
return false;
}
- return OpenAllDexFilesFromZip(*zip_archive, location, verify_checksum, error_msg, dex_files);
+ return OpenAllDexFilesFromZip(
+ *zip_archive, location, verify, verify_checksum, error_msg, dex_files);
}
std::unique_ptr<const DexFile> DexFileLoader::OpenFile(int fd,
@@ -292,10 +349,9 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenFile(int fd,
kNoOatDexFile,
verify,
verify_checksum,
- error_msg);
- if (dex_file != nullptr) {
- dex_file->mem_map_ = std::move(map);
- }
+ error_msg,
+ new MemMapContainer(std::move(map)),
+ /*verify_result*/ nullptr);
return dex_file;
}
@@ -304,6 +360,7 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
const ZipArchive& zip_archive,
const char* entry_name,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
ZipOpenErrorCode* error_code) {
@@ -357,9 +414,10 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
location,
zip_entry->GetCrc32(),
kNoOatDexFile,
- /* verify */ true,
+ verify,
verify_checksum,
error_msg,
+ new MemMapContainer(std::move(map)),
&verify_result);
if (dex_file == nullptr) {
if (verify_result == VerifyResult::kVerifyNotAttempted) {
@@ -369,7 +427,6 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
}
return nullptr;
}
- dex_file->mem_map_ = std::move(map);
if (!dex_file->DisableWrite()) {
*error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
*error_code = ZipOpenErrorCode::kMakeReadOnlyError;
@@ -391,16 +448,18 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
static constexpr size_t kWarnOnManyDexFilesThreshold = 100;
bool DexFileLoader::OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) {
ScopedTrace trace("Dex file open from Zip " + std::string(location));
DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
ZipOpenErrorCode error_code;
std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive,
kClassesDex,
location,
+ verify,
verify_checksum,
error_msg,
&error_code));
@@ -421,6 +480,7 @@ bool DexFileLoader::OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
std::unique_ptr<const DexFile> next_dex_file(OpenOneDexFileFromZip(zip_archive,
name.c_str(),
fake_location,
+ verify,
verify_checksum,
error_msg,
&error_code));
@@ -457,15 +517,18 @@ std::unique_ptr<DexFile> DexFileLoader::OpenCommon(const uint8_t* base,
bool verify,
bool verify_checksum,
std::string* error_msg,
+ DexFileContainer* container,
VerifyResult* verify_result) {
if (verify_result != nullptr) {
*verify_result = VerifyResult::kVerifyNotAttempted;
}
std::unique_ptr<DexFile> dex_file;
if (StandardDexFile::IsMagicValid(base)) {
- dex_file.reset(new StandardDexFile(base, size, location, location_checksum, oat_dex_file));
- } else {
- return nullptr;
+ dex_file.reset(
+ new StandardDexFile(base, size, location, location_checksum, oat_dex_file, container));
+ } else if (CompactDexFile::IsMagicValid(base)) {
+ dex_file.reset(
+ new CompactDexFile(base, size, location, location_checksum, oat_dex_file, container));
}
if (dex_file == nullptr) {
*error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location.c_str(),
diff --git a/runtime/dex_file_loader.h b/runtime/dex_file_loader.h
index cb17eccfc9..17631234b3 100644
--- a/runtime/dex_file_loader.h
+++ b/runtime/dex_file_loader.h
@@ -25,6 +25,7 @@
namespace art {
class DexFile;
+class DexFileContainer;
class MemMap;
class OatDexFile;
class ZipArchive;
@@ -49,10 +50,15 @@ class DexFileLoader {
// For .dex files, this is the single header checksum.
// For zip files, this is the zip entry CRC32 checksum for classes.dex and
// each additional multidex entry classes2.dex, classes3.dex, etc.
+ // If a valid zip_fd is provided the file content will be read directly from
+ // the descriptor and `filename` will be used as alias for error logging. If
+ // zip_fd is -1, the method will try to open the `filename` and read the
+ // content from it.
// Return true if the checksums could be found, false otherwise.
static bool GetMultiDexChecksums(const char* filename,
std::vector<uint32_t>* checksums,
- std::string* error_msg);
+ std::string* error_msg,
+ int zip_fd = -1);
// Check whether a location denotes a multidex dex file. This is a very simple check: returns
// whether the string contains the separator character.
@@ -79,6 +85,7 @@ class DexFileLoader {
// Opens all .dex files found in the file, guessing the container format based on file extension.
static bool Open(const char* filename,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files);
@@ -86,12 +93,14 @@ class DexFileLoader {
// Open a single dex file from an fd. This function closes the fd.
static std::unique_ptr<const DexFile> OpenDex(int fd,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg);
// Opens dex files from within a .jar, .zip, or .apk file
static bool OpenZip(int fd,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files);
@@ -158,6 +167,7 @@ class DexFileLoader {
// Open all classesXXX.dex files from a zip archive.
static bool OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files);
@@ -167,6 +177,7 @@ class DexFileLoader {
static std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const ZipArchive& zip_archive,
const char* entry_name,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
ZipOpenErrorCode* error_code);
@@ -185,17 +196,8 @@ class DexFileLoader {
bool verify,
bool verify_checksum,
std::string* error_msg,
- VerifyResult* verify_result = nullptr);
-
-
- // Opens a .dex file at the given address, optionally backed by a MemMap
- static std::unique_ptr<const DexFile> OpenMemory(const uint8_t* dex_file,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- std::unique_ptr<MemMap> mem_map,
- const OatDexFile* oat_dex_file,
- std::string* error_msg);
+ DexFileContainer* container,
+ VerifyResult* verify_result);
};
} // namespace art
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index b3011379c6..90bc4b8f94 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -236,7 +236,8 @@ static bool OpenDexFilesBase64(const char* base64,
ScopedObjectAccess soa(Thread::Current());
static constexpr bool kVerifyChecksum = true;
std::vector<std::unique_ptr<const DexFile>> tmp;
- bool success = DexFileLoader::Open(location, location, kVerifyChecksum, error_msg, &tmp);
+ bool success = DexFileLoader::Open(
+ location, location, /* verify */ true, kVerifyChecksum, error_msg, &tmp);
if (success) {
for (std::unique_ptr<const DexFile>& dex_file : tmp) {
EXPECT_EQ(PROT_READ, dex_file->GetPermissions());
@@ -366,7 +367,8 @@ TEST_F(DexFileTest, Version40Rejected) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+ ASSERT_FALSE(DexFileLoader::Open(
+ location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files));
}
TEST_F(DexFileTest, Version41Rejected) {
@@ -378,7 +380,8 @@ TEST_F(DexFileTest, Version41Rejected) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+ ASSERT_FALSE(DexFileLoader::Open(
+ location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files));
}
TEST_F(DexFileTest, ZeroLengthDexRejected) {
@@ -390,7 +393,8 @@ TEST_F(DexFileTest, ZeroLengthDexRejected) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+ ASSERT_FALSE(DexFileLoader::Open(
+ location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files));
}
TEST_F(DexFileTest, GetLocationChecksum) {
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 8fdd4706e4..50f56c799a 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -721,14 +721,19 @@ bool DexFileVerifier::CheckClassDataItemMethod(uint32_t idx,
return true;
}
-bool DexFileVerifier::CheckPadding(size_t offset, uint32_t aligned_offset) {
+bool DexFileVerifier::CheckPadding(size_t offset,
+ uint32_t aligned_offset,
+ DexFile::MapItemType type) {
if (offset < aligned_offset) {
if (!CheckListSize(begin_ + offset, aligned_offset - offset, sizeof(uint8_t), "section")) {
return false;
}
while (offset < aligned_offset) {
if (UNLIKELY(*ptr_ != '\0')) {
- ErrorStringPrintf("Non-zero padding %x before section start at %zx", *ptr_, offset);
+ ErrorStringPrintf("Non-zero padding %x before section of type %zu at offset 0x%zx",
+ *ptr_,
+ static_cast<size_t>(type),
+ offset);
return false;
}
ptr_++;
@@ -1615,7 +1620,7 @@ bool DexFileVerifier::CheckIntraSectionIterate(size_t offset, uint32_t section_c
size_t aligned_offset = (offset + alignment_mask) & ~alignment_mask;
// Check the padding between items.
- if (!CheckPadding(offset, aligned_offset)) {
+ if (!CheckPadding(offset, aligned_offset, type)) {
return false;
}
@@ -1837,7 +1842,10 @@ bool DexFileVerifier::CheckIntraDataSection(size_t offset,
size_t next_offset = ptr_ - begin_;
if (next_offset > data_end) {
- ErrorStringPrintf("Out-of-bounds end of data subsection: %zx", next_offset);
+ ErrorStringPrintf("Out-of-bounds end of data subsection: %zu data_off=%u data_size=%u",
+ next_offset,
+ header_->data_off_,
+ header_->data_size_);
return false;
}
@@ -1859,7 +1867,7 @@ bool DexFileVerifier::CheckIntraSection() {
DexFile::MapItemType type = static_cast<DexFile::MapItemType>(item->type_);
// Check for padding and overlap between items.
- if (!CheckPadding(offset, section_offset)) {
+ if (!CheckPadding(offset, section_offset, type)) {
return false;
} else if (UNLIKELY(offset > section_offset)) {
ErrorStringPrintf("Section overlap or out-of-order map: %zx, %x", offset, section_offset);
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 74f82254b3..23089fa215 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -97,7 +97,7 @@ class DexFileVerifier {
const DexFile::ClassDef** class_def);
bool CheckStaticFieldTypes(const DexFile::ClassDef* class_def);
- bool CheckPadding(size_t offset, uint32_t aligned_offset);
+ bool CheckPadding(size_t offset, uint32_t aligned_offset, DexFile::MapItemType type);
bool CheckEncodedValue();
bool CheckEncodedArray();
bool CheckEncodedAnnotation();
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 9f3505d3be..ee577e7d9a 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -57,7 +57,7 @@ static void FixUpChecksum(uint8_t* dex_file) {
class DexFileVerifierTest : public CommonRuntimeTest {
protected:
DexFile* GetDexFile(const uint8_t* dex_bytes, size_t length) {
- return new StandardDexFile(dex_bytes, length, "tmp", 0, nullptr);
+ return new StandardDexFile(dex_bytes, length, "tmp", 0, nullptr, nullptr);
}
void VerifyModification(const char* dex_file_base64_content,
@@ -114,7 +114,8 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
// read dex file
ScopedObjectAccess soa(Thread::Current());
std::vector<std::unique_ptr<const DexFile>> tmp;
- bool success = DexFileLoader::Open(location, location, true, error_msg, &tmp);
+ bool success = DexFileLoader::Open(
+ location, location, /* verify */ true, /* verify_checksum */ true, error_msg, &tmp);
CHECK(success) << *error_msg;
EXPECT_EQ(1U, tmp.size());
std::unique_ptr<const DexFile> dex_file = std::move(tmp[0]);
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 99fe53bb71..e64c0f62c2 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -119,6 +119,26 @@ size_t Instruction::SizeInCodeUnitsComplexOpcode() const {
}
}
+size_t Instruction::CodeUnitsRequiredForSizeOfComplexOpcode() const {
+ const uint16_t* insns = reinterpret_cast<const uint16_t*>(this);
+ // Handle special NOP encoded variable length sequences.
+ switch (*insns) {
+ case kPackedSwitchSignature:
+ FALLTHROUGH_INTENDED;
+ case kSparseSwitchSignature:
+ return 2;
+ case kArrayDataSignature:
+ return 4;
+ default:
+ if ((*insns & 0xFF) == 0) {
+ return 1; // NOP.
+ } else {
+ LOG(FATAL) << "Unreachable: " << DumpString(nullptr);
+ UNREACHABLE();
+ }
+ }
+}
+
std::string Instruction::DumpHex(size_t code_units) const {
size_t inst_length = SizeInCodeUnits();
if (inst_length > code_units) {
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 2f28dffa2b..09c78b2428 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -225,6 +225,12 @@ class Instruction {
}
}
+ // Code units required to calculate the size of the instruction.
+ size_t CodeUnitsRequiredForSizeComputation() const {
+ const int8_t result = kInstructionDescriptors[Opcode()].size_in_code_units;
+ return UNLIKELY(result < 0) ? CodeUnitsRequiredForSizeOfComplexOpcode() : 1;
+ }
+
// Reads an instruction out of the stream at the specified address.
static const Instruction* At(const uint16_t* code) {
DCHECK(code != nullptr);
@@ -638,6 +644,9 @@ class Instruction {
private:
size_t SizeInCodeUnitsComplexOpcode() const;
+ // Return how many code unit words are required to compute the size of the opcode.
+ size_t CodeUnitsRequiredForSizeOfComplexOpcode() const;
+
uint32_t Fetch32(size_t offset) const {
return (Fetch16(offset) | ((uint32_t) Fetch16(offset + 1) << 16));
}
diff --git a/runtime/dex_instruction_iterator.h b/runtime/dex_instruction_iterator.h
index 280746e9dc..f77908cffc 100644
--- a/runtime/dex_instruction_iterator.h
+++ b/runtime/dex_instruction_iterator.h
@@ -24,85 +24,209 @@
namespace art {
-class DexInstructionIterator : public std::iterator<std::forward_iterator_tag, Instruction> {
+class DexInstructionPcPair {
public:
- using value_type = std::iterator<std::forward_iterator_tag, Instruction>::value_type;
- using difference_type = std::iterator<std::forward_iterator_tag, value_type>::difference_type;
-
- DexInstructionIterator() = default;
- DexInstructionIterator(const DexInstructionIterator&) = default;
- DexInstructionIterator(DexInstructionIterator&&) = default;
- DexInstructionIterator& operator=(const DexInstructionIterator&) = default;
- DexInstructionIterator& operator=(DexInstructionIterator&&) = default;
-
- explicit DexInstructionIterator(const value_type* inst) : inst_(inst) {}
- explicit DexInstructionIterator(const uint16_t* inst) : inst_(value_type::At(inst)) {}
+ ALWAYS_INLINE const Instruction& Inst() const {
+ return *Instruction::At(instructions_ + DexPc());
+ }
- // Value after modification.
- DexInstructionIterator& operator++() {
- inst_ = inst_->Next();
- return *this;
+ ALWAYS_INLINE const Instruction* operator->() const {
+ return &Inst();
}
- // Value before modification.
- DexInstructionIterator operator++(int) {
- DexInstructionIterator temp = *this;
- ++*this;
- return temp;
+ ALWAYS_INLINE uint32_t DexPc() const {
+ return dex_pc_;
}
- const value_type& operator*() const {
- return *inst_;
+ ALWAYS_INLINE const uint16_t* Instructions() const {
+ return instructions_;
}
- const value_type* operator->() const {
- return &**this;
+ protected:
+ explicit DexInstructionPcPair(const uint16_t* instructions, uint32_t dex_pc)
+ : instructions_(instructions), dex_pc_(dex_pc) {}
+
+ const uint16_t* instructions_ = nullptr;
+ uint32_t dex_pc_ = 0;
+
+ friend class DexInstructionIteratorBase;
+ friend class DexInstructionIterator;
+ friend class SafeDexInstructionIterator;
+};
+
+// Base helper class to prevent duplicated comparators.
+class DexInstructionIteratorBase : public
+ std::iterator<std::forward_iterator_tag, DexInstructionPcPair> {
+ public:
+ using value_type = std::iterator<std::forward_iterator_tag, DexInstructionPcPair>::value_type;
+ using difference_type = std::iterator<std::forward_iterator_tag, value_type>::difference_type;
+
+ DexInstructionIteratorBase() = default;
+ explicit DexInstructionIteratorBase(const Instruction* inst, uint32_t dex_pc)
+ : data_(reinterpret_cast<const uint16_t*>(inst), dex_pc) {}
+
+ const Instruction& Inst() const {
+ return data_.Inst();
}
// Return the dex pc for an iterator compared to the code item begin.
- uint32_t GetDexPC(const DexInstructionIterator& code_item_begin) {
- return reinterpret_cast<const uint16_t*>(inst_) -
- reinterpret_cast<const uint16_t*>(code_item_begin.inst_);
+ ALWAYS_INLINE uint32_t DexPc() const {
+ return data_.DexPc();
}
- const value_type* Inst() const {
- return inst_;
+ // Instructions from the start of the code item.
+ ALWAYS_INLINE const uint16_t* Instructions() const {
+ return data_.Instructions();
}
- private:
- const value_type* inst_ = nullptr;
+ protected:
+ DexInstructionPcPair data_;
};
-static ALWAYS_INLINE inline bool operator==(const DexInstructionIterator& lhs,
- const DexInstructionIterator& rhs) {
- return lhs.Inst() == rhs.Inst();
+static ALWAYS_INLINE inline bool operator==(const DexInstructionIteratorBase& lhs,
+ const DexInstructionIteratorBase& rhs) {
+ DCHECK_EQ(lhs.Instructions(), rhs.Instructions()) << "Comparing different code items.";
+ return lhs.DexPc() == rhs.DexPc();
}
-static inline bool operator!=(const DexInstructionIterator& lhs,
- const DexInstructionIterator& rhs) {
+static inline bool operator!=(const DexInstructionIteratorBase& lhs,
+ const DexInstructionIteratorBase& rhs) {
return !(lhs == rhs);
}
-static inline bool operator<(const DexInstructionIterator& lhs,
- const DexInstructionIterator& rhs) {
- return lhs.Inst() < rhs.Inst();
+static inline bool operator<(const DexInstructionIteratorBase& lhs,
+ const DexInstructionIteratorBase& rhs) {
+ DCHECK_EQ(lhs.Instructions(), rhs.Instructions()) << "Comparing different code items.";
+ return lhs.DexPc() < rhs.DexPc();
}
-static inline bool operator>(const DexInstructionIterator& lhs,
- const DexInstructionIterator& rhs) {
+static inline bool operator>(const DexInstructionIteratorBase& lhs,
+ const DexInstructionIteratorBase& rhs) {
return rhs < lhs;
}
-static inline bool operator<=(const DexInstructionIterator& lhs,
- const DexInstructionIterator& rhs) {
+static inline bool operator<=(const DexInstructionIteratorBase& lhs,
+ const DexInstructionIteratorBase& rhs) {
return !(rhs < lhs);
}
-static inline bool operator>=(const DexInstructionIterator& lhs,
- const DexInstructionIterator& rhs) {
+static inline bool operator>=(const DexInstructionIteratorBase& lhs,
+ const DexInstructionIteratorBase& rhs) {
return !(lhs < rhs);
}
+// A helper class for a code_item's instructions using range based for loop syntax.
+class DexInstructionIterator : public DexInstructionIteratorBase {
+ public:
+ using DexInstructionIteratorBase::DexInstructionIteratorBase;
+
+ explicit DexInstructionIterator(const uint16_t* inst, uint32_t dex_pc)
+ : DexInstructionIteratorBase(Instruction::At(inst), dex_pc) {}
+
+ // Value after modification.
+ DexInstructionIterator& operator++() {
+ data_.dex_pc_ += Inst().SizeInCodeUnits();
+ return *this;
+ }
+
+ // Value before modification.
+ DexInstructionIterator operator++(int) {
+ DexInstructionIterator temp = *this;
+ ++*this;
+ return temp;
+ }
+
+ const value_type& operator*() const {
+ return data_;
+ }
+
+ const Instruction* operator->() const {
+ return &data_.Inst();
+ }
+
+ // Return the dex pc for the iterator.
+ ALWAYS_INLINE uint32_t DexPc() const {
+ return data_.DexPc();
+ }
+};
+
+// A safe version of DexInstructionIterator that is guaranteed to not go past the end of the code
+// item.
+class SafeDexInstructionIterator : public DexInstructionIteratorBase {
+ public:
+ explicit SafeDexInstructionIterator(const DexInstructionIteratorBase& start,
+ const DexInstructionIteratorBase& end)
+ : DexInstructionIteratorBase(&start.Inst(), start.DexPc())
+ , num_code_units_(end.DexPc()) {
+ DCHECK_EQ(start.Instructions(), end.Instructions())
+ << "start and end must be in the same code item.";
+ }
+
+ // Value after modification, does not read past the end of the allowed region. May increment past
+ // the end of the code item though.
+ SafeDexInstructionIterator& operator++() {
+ AssertValid();
+ const size_t size_code_units = Inst().CodeUnitsRequiredForSizeComputation();
+ const size_t available = NumCodeUnits() - DexPc();
+ if (UNLIKELY(size_code_units > available)) {
+ error_state_ = true;
+ return *this;
+ }
+ const size_t instruction_code_units = Inst().SizeInCodeUnits();
+ if (UNLIKELY(instruction_code_units > available)) {
+ error_state_ = true;
+ return *this;
+ }
+ data_.dex_pc_ += instruction_code_units;
+ return *this;
+ }
+
+ // Value before modification.
+ SafeDexInstructionIterator operator++(int) {
+ SafeDexInstructionIterator temp = *this;
+ ++*this;
+ return temp;
+ }
+
+ const value_type& operator*() const {
+ AssertValid();
+ return data_;
+ }
+
+ const Instruction* operator->() const {
+ AssertValid();
+ return &data_.Inst();
+ }
+
+ // Return the current instruction of the iterator.
+ ALWAYS_INLINE const Instruction& Inst() const {
+ return data_.Inst();
+ }
+
+ const uint16_t* Instructions() const {
+ return data_.Instructions();
+ }
+
+ // Returns true if the iterator is in an error state. This occurs when an instruction couldn't
+ // have its size computed without reading past the end iterator.
+ bool IsErrorState() const {
+ return error_state_;
+ }
+
+ private:
+ ALWAYS_INLINE void AssertValid() const {
+ DCHECK(!IsErrorState());
+ DCHECK_LT(DexPc(), NumCodeUnits());
+ }
+
+ ALWAYS_INLINE uint32_t NumCodeUnits() const {
+ return num_code_units_;
+ }
+
+ const uint32_t num_code_units_ = 0;
+ bool error_state_ = false;
+};
+
} // namespace art
#endif // ART_RUNTIME_DEX_INSTRUCTION_ITERATOR_H_
diff --git a/runtime/dex_instruction_test.cc b/runtime/dex_instruction_test.cc
index 48ed027882..c944085b9e 100644
--- a/runtime/dex_instruction_test.cc
+++ b/runtime/dex_instruction_test.cc
@@ -74,7 +74,7 @@ TEST(Instruction, PropertiesOf45cc) {
Build45cc(4u /* num_vregs */, 16u /* method_idx */, 32u /* proto_idx */,
0xcafe /* arg_regs */, instruction);
- DexInstructionIterator ins(instruction);
+ DexInstructionIterator ins(instruction, /*dex_pc*/ 0u);
ASSERT_EQ(4u, ins->SizeInCodeUnits());
ASSERT_TRUE(ins->HasVRegA());
@@ -109,7 +109,7 @@ TEST(Instruction, PropertiesOf4rcc) {
Build4rcc(4u /* num_vregs */, 16u /* method_idx */, 32u /* proto_idx */,
0xcafe /* arg_regs */, instruction);
- DexInstructionIterator ins(instruction);
+ DexInstructionIterator ins(instruction, /*dex_pc*/ 0u);
ASSERT_EQ(4u, ins->SizeInCodeUnits());
ASSERT_TRUE(ins->HasVRegA());
@@ -155,7 +155,7 @@ static std::string DumpInst35c(Instruction::Code code,
std::vector<uint16_t> args) {
uint16_t inst[6] = {};
Build35c(inst, code, method_idx, args);
- return DexInstructionIterator(inst)->DumpString(nullptr);
+ return Instruction::At(inst)->DumpString(nullptr);
}
TEST(Instruction, DumpString) {
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 69e3fc1045..ef27ca325c 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -68,28 +68,50 @@ class ScopedQuickEntrypointChecks {
};
static constexpr size_t GetCalleeSaveFrameSize(InstructionSet isa, CalleeSaveType type) {
- // constexpr must be a return statement.
- return (isa == kArm || isa == kThumb2) ? arm::ArmCalleeSaveFrameSize(type) :
- isa == kArm64 ? arm64::Arm64CalleeSaveFrameSize(type) :
- isa == kMips ? mips::MipsCalleeSaveFrameSize(type) :
- isa == kMips64 ? mips64::Mips64CalleeSaveFrameSize(type) :
- isa == kX86 ? x86::X86CalleeSaveFrameSize(type) :
- isa == kX86_64 ? x86_64::X86_64CalleeSaveFrameSize(type) :
- isa == kNone ? (LOG(FATAL) << "kNone has no frame size", 0) :
- (LOG(FATAL) << "Unknown instruction set" << isa, 0);
+ switch (isa) {
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
+ return arm::ArmCalleeSaveFrameSize(type);
+ case InstructionSet::kArm64:
+ return arm64::Arm64CalleeSaveFrameSize(type);
+ case InstructionSet::kMips:
+ return mips::MipsCalleeSaveFrameSize(type);
+ case InstructionSet::kMips64:
+ return mips64::Mips64CalleeSaveFrameSize(type);
+ case InstructionSet::kX86:
+ return x86::X86CalleeSaveFrameSize(type);
+ case InstructionSet::kX86_64:
+ return x86_64::X86_64CalleeSaveFrameSize(type);
+ case InstructionSet::kNone:
+ LOG(FATAL) << "kNone has no frame size";
+ UNREACHABLE();
+ }
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
}
// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
static constexpr PointerSize GetConstExprPointerSize(InstructionSet isa) {
- // constexpr must be a return statement.
- return (isa == kArm || isa == kThumb2) ? kArmPointerSize :
- isa == kArm64 ? kArm64PointerSize :
- isa == kMips ? kMipsPointerSize :
- isa == kMips64 ? kMips64PointerSize :
- isa == kX86 ? kX86PointerSize :
- isa == kX86_64 ? kX86_64PointerSize :
- isa == kNone ? (LOG(FATAL) << "kNone has no pointer size", PointerSize::k32) :
- (LOG(FATAL) << "Unknown instruction set" << isa, PointerSize::k32);
+ switch (isa) {
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
+ return kArmPointerSize;
+ case InstructionSet::kArm64:
+ return kArm64PointerSize;
+ case InstructionSet::kMips:
+ return kMipsPointerSize;
+ case InstructionSet::kMips64:
+ return kMips64PointerSize;
+ case InstructionSet::kX86:
+ return kX86PointerSize;
+ case InstructionSet::kX86_64:
+ return kX86_64PointerSize;
+ case InstructionSet::kNone:
+ LOG(FATAL) << "kNone has no pointer size";
+ UNREACHABLE();
+ }
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
}
// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 6bb67a3727..a8d2a34853 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -232,7 +232,7 @@ extern uint64_t GenericJniMethodEnd(Thread* self,
}
switch (return_shorty_char) {
case 'F': {
- if (kRuntimeISA == kX86) {
+ if (kRuntimeISA == InstructionSet::kX86) {
// Convert back the result to float.
double d = bit_cast<double, uint64_t>(result_f);
return bit_cast<uint32_t, float>(static_cast<float>(d));
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index cf5cc111b7..a4a8c349a3 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -961,9 +961,13 @@ extern "C" uint64_t artQuickProxyInvokeHandler(
self->EndAssertNoThreadSuspension(old_cause);
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
- jobject interface_method_jobj = soa.AddLocalReference<jobject>(
- mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(),
- interface_method));
+ ObjPtr<mirror::Method> interface_reflect_method =
+ mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), interface_method);
+ if (interface_reflect_method == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return 0;
+ }
+ jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_reflect_method);
// All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
// that performs allocations.
@@ -2574,7 +2578,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_metho
// each type.
extern "C" uintptr_t artInvokePolymorphic(
JValue* result,
- mirror::Object* raw_method_handle,
+ mirror::Object* raw_receiver,
Thread* self,
ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -2602,26 +2606,29 @@ extern "C" uintptr_t artInvokePolymorphic(
RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, shorty_length, &soa);
gc_visitor.VisitArguments();
- // Wrap raw_method_handle in a Handle for safety.
- StackHandleScope<2> hs(self);
- Handle<mirror::MethodHandle> method_handle(
- hs.NewHandle(ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(raw_method_handle))));
- raw_method_handle = nullptr;
+ // Wrap raw_receiver in a Handle for safety.
+ StackHandleScope<3> hs(self);
+ Handle<mirror::Object> receiver_handle(hs.NewHandle(raw_receiver));
+ raw_receiver = nullptr;
self->EndAssertNoThreadSuspension(old_cause);
- // Resolve method - it's either MethodHandle.invoke() or MethodHandle.invokeExact().
+ // Resolve method.
ClassLinker* linker = Runtime::Current()->GetClassLinker();
ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
self, inst.VRegB(), caller_method, kVirtual);
- DCHECK((resolved_method ==
- jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact)) ||
- (resolved_method ==
- jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invoke)));
- if (UNLIKELY(method_handle.IsNull())) {
+
+ if (UNLIKELY(receiver_handle.IsNull())) {
ThrowNullPointerExceptionForMethodAccess(resolved_method, InvokeType::kVirtual);
return static_cast<uintptr_t>('V');
}
+ // TODO(oth): Ensure this path isn't taken for VarHandle accessors (b/65872996).
+ DCHECK_EQ(resolved_method->GetDeclaringClass(),
+ WellKnownClasses::ToClass(WellKnownClasses::java_lang_invoke_MethodHandle));
+
+ Handle<mirror::MethodHandle> method_handle(hs.NewHandle(
+ ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(receiver_handle.Get()))));
+
Handle<mirror::MethodType> method_type(
hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method)));
@@ -2662,16 +2669,28 @@ extern "C" uintptr_t artInvokePolymorphic(
// consecutive order.
uint32_t unused_args[Instruction::kMaxVarArgRegs] = {};
uint32_t first_callee_arg = first_arg + 1;
- if (!DoInvokePolymorphic<true /* is_range */>(self,
- resolved_method,
- *shadow_frame,
- method_handle,
- method_type,
- unused_args,
- first_callee_arg,
- result)) {
- DCHECK(self->IsExceptionPending());
- }
+
+ bool isExact = (jni::EncodeArtMethod(resolved_method) ==
+ WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact);
+ bool success = false;
+ if (isExact) {
+ success = MethodHandleInvokeExact<true/*is_range*/>(self,
+ *shadow_frame,
+ method_handle,
+ method_type,
+ unused_args,
+ first_callee_arg,
+ result);
+ } else {
+ success = MethodHandleInvoke<true/*is_range*/>(self,
+ *shadow_frame,
+ method_handle,
+ method_type,
+ unused_args,
+ first_callee_arg,
+ result);
+ }
+ DCHECK(success || self->IsExceptionPending());
// Pop transition record.
self->PopManagedStackFragment(fragment);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index b692618740..77b3132bdc 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -103,23 +103,29 @@ TEST_F(QuickTrampolineEntrypointsTest, FrameSize) {
GetCalleeSaveFrameSize( \
isa, CalleeSaveType::kSaveEverythingForSuspendCheck))
- CHECK_FRAME_SIZE(kArm);
- CHECK_FRAME_SIZE(kArm64);
- CHECK_FRAME_SIZE(kMips);
- CHECK_FRAME_SIZE(kMips64);
- CHECK_FRAME_SIZE(kX86);
- CHECK_FRAME_SIZE(kX86_64);
+ CHECK_FRAME_SIZE(InstructionSet::kArm);
+ CHECK_FRAME_SIZE(InstructionSet::kArm64);
+ CHECK_FRAME_SIZE(InstructionSet::kMips);
+ CHECK_FRAME_SIZE(InstructionSet::kMips64);
+ CHECK_FRAME_SIZE(InstructionSet::kX86);
+ CHECK_FRAME_SIZE(InstructionSet::kX86_64);
}
// This test ensures that GetConstExprPointerSize is correct with respect to
// GetInstructionSetPointerSize.
TEST_F(QuickTrampolineEntrypointsTest, PointerSize) {
- EXPECT_EQ(GetInstructionSetPointerSize(kArm), GetConstExprPointerSize(kArm));
- EXPECT_EQ(GetInstructionSetPointerSize(kArm64), GetConstExprPointerSize(kArm64));
- EXPECT_EQ(GetInstructionSetPointerSize(kMips), GetConstExprPointerSize(kMips));
- EXPECT_EQ(GetInstructionSetPointerSize(kMips64), GetConstExprPointerSize(kMips64));
- EXPECT_EQ(GetInstructionSetPointerSize(kX86), GetConstExprPointerSize(kX86));
- EXPECT_EQ(GetInstructionSetPointerSize(kX86_64), GetConstExprPointerSize(kX86_64));
+ EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kArm),
+ GetConstExprPointerSize(InstructionSet::kArm));
+ EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kArm64),
+ GetConstExprPointerSize(InstructionSet::kArm64));
+ EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kMips),
+ GetConstExprPointerSize(InstructionSet::kMips));
+ EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kMips64),
+ GetConstExprPointerSize(InstructionSet::kMips64));
+ EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kX86),
+ GetConstExprPointerSize(InstructionSet::kX86));
+ EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kX86_64),
+ GetConstExprPointerSize(InstructionSet::kX86_64));
}
// This test ensures that the constexpr specialization of the return PC offset computation in
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 52b355dedd..7beff960cc 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "base/enums.h"
+#include "base/file_utils.h"
#include "base/histogram-inl.h"
#include "base/stl_util.h"
#include "base/systrace.h"
@@ -2153,14 +2154,18 @@ void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t by
mirror::Class* int_array_class = down_cast<mirror::Class*>(
Mark(mirror::IntArray::GetArrayClass<kWithoutReadBarrier>()));
CHECK(int_array_class != nullptr);
- AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
+ if (ReadBarrier::kEnableToSpaceInvariantChecks) {
+ AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
+ }
size_t component_size = int_array_class->GetComponentSize<kWithoutReadBarrier>();
CHECK_EQ(component_size, sizeof(int32_t));
size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
if (data_offset > byte_size) {
// An int array is too big. Use java.lang.Object.
CHECK(java_lang_Object_ != nullptr);
- AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_);
+ if (ReadBarrier::kEnableToSpaceInvariantChecks) {
+ AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_);
+ }
CHECK_EQ(byte_size, (java_lang_Object_->GetObjectSize<kVerifyNone, kWithoutReadBarrier>()));
dummy_obj->SetClass(java_lang_Object_);
CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()));
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 2dc5acc14b..c6caf4b08e 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -24,6 +24,7 @@
#include "base/bounded_fifo.h"
#include "base/enums.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex-inl.h"
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 67e8a0d02f..9f6266612a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -28,6 +28,7 @@
#include "base/allocator.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
+#include "base/file_utils.h"
#include "base/histogram-inl.h"
#include "base/memory_tool.h"
#include "base/stl_util.h"
@@ -77,7 +78,7 @@
#include "mirror/object-refvisitor-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/reference-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr-inl.h"
#include "os.h"
#include "reflection.h"
@@ -93,6 +94,9 @@ namespace gc {
static constexpr size_t kCollectorTransitionStressIterations = 0;
static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
+
+DEFINE_RUNTIME_DEBUG_FLAG(Heap, kStressCollectorTransition);
+
// Minimum amount of remaining bytes before a concurrent GC is triggered.
static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
@@ -889,7 +893,9 @@ void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_p
// the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't
// transition the collector.
RequestCollectorTransition(background_collector_type_,
- kIsDebugBuild ? 0 : kCollectorTransitionWait);
+ kStressCollectorTransition
+ ? 0
+ : kCollectorTransitionWait);
}
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d673b4ac29..7b4fab607f 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -25,6 +25,7 @@
#include "allocator_type.h"
#include "arch/instruction_set.h"
#include "atomic.h"
+#include "base/logging.h"
#include "base/mutex.h"
#include "base/time_utils.h"
#include "gc/collector/gc_type.h"
@@ -155,6 +156,9 @@ class Heap {
static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
// How long we wait after a transition request to perform a collector transition (nanoseconds).
static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
+ // Whether the transition-wait applies or not. Zero wait will stress the
+ // transition code and collector, but increases jank probability.
+ DECLARE_RUNTIME_DEBUG_FLAG(kStressCollectorTransition);
// Create a heap with the requested sizes. The possible empty
// image_file_names names specify Spaces to load based on
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 42b31ab140..d58d09c794 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -22,7 +22,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/reference-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "object_callbacks.h"
#include "reference_processor-inl.h"
#include "reflection.h"
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index f0eada3cb4..74813b4dd1 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -30,6 +30,7 @@
#include "art_method-inl.h"
#include "base/callee_save_type.h"
#include "base/enums.h"
+#include "base/file_utils.h"
#include "base/macros.h"
#include "base/scoped_flock.h"
#include "base/stl_util.h"
diff --git a/runtime/gc/space/image_space_fs.h b/runtime/gc/space/image_space_fs.h
index 5999548d2b..a0ecb95ac7 100644
--- a/runtime/gc/space/image_space_fs.h
+++ b/runtime/gc/space/image_space_fs.h
@@ -22,6 +22,7 @@
#include "android-base/stringprintf.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/unix_file/fd_file.h"
@@ -88,7 +89,7 @@ static void DeleteDirectoryContents(const std::string& dir, bool recurse) {
// Adapted from prune_dex_cache(const char* subdir) in frameworks/native/cmds/installd/commands.c
// Note this should only be used during first boot.
static void PruneDalvikCache(InstructionSet isa) {
- CHECK_NE(isa, kNone);
+ CHECK_NE(isa, InstructionSet::kNone);
// Prune the base /data/dalvik-cache.
// Note: GetDalvikCache may return the empty string if the directory doesn't
// exist. It is safe to pass "" to DeleteDirectoryContents, so this is okay.
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index beb43dfcf5..3cd04a61e9 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -20,6 +20,7 @@
#include <sstream>
#include "art_field-inl.h"
+#include "base/file_utils.h"
#include "mirror/class-inl.h"
#include "mirror/object-refvisitor-inl.h"
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 071d1aedb7..e1582120dc 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -39,23 +39,21 @@ DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OF
#define RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET 0x28
DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverythingForSuspendCheck))))
#define THREAD_FLAGS_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_FLAGS_OFFSET), (static_cast<int32_t>(art::Thread:: ThreadFlagsOffset<art::kRuntimePointerSize>().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_FLAGS_OFFSET), (static_cast<int32_t>(art::Thread::ThreadFlagsOffset<art::kRuntimePointerSize>().Int32Value())))
#define THREAD_ID_OFFSET 12
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_ID_OFFSET), (static_cast<int32_t>(art::Thread:: ThinLockIdOffset<art::kRuntimePointerSize>().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_ID_OFFSET), (static_cast<int32_t>(art::Thread::ThinLockIdOffset<art::kRuntimePointerSize>().Int32Value())))
#define THREAD_IS_GC_MARKING_OFFSET 52
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_IS_GC_MARKING_OFFSET), (static_cast<int32_t>(art::Thread:: IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_IS_GC_MARKING_OFFSET), (static_cast<int32_t>(art::Thread::IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())))
#define THREAD_CARD_TABLE_OFFSET 136
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CARD_TABLE_OFFSET), (static_cast<int32_t>(art::Thread:: CardTableOffset<art::kRuntimePointerSize>().Int32Value())))
-#define CODEITEM_INSNS_OFFSET 16
-DEFINE_CHECK_EQ(static_cast<int32_t>(CODEITEM_INSNS_OFFSET), (static_cast<int32_t>(__builtin_offsetof(art::DexFile::CodeItem, insns_))))
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CARD_TABLE_OFFSET), (static_cast<int32_t>(art::Thread::CardTableOffset<art::kRuntimePointerSize>().Int32Value())))
#define MIRROR_CLASS_DEX_CACHE_OFFSET 16
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_CLASS_DEX_CACHE_OFFSET), (static_cast<int32_t>(art::mirror::Class:: DexCacheOffset().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_CLASS_DEX_CACHE_OFFSET), (static_cast<int32_t>(art::mirror::Class::DexCacheOffset().Int32Value())))
#define MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET 48
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET), (static_cast<int32_t>(art::mirror::DexCache:: ResolvedMethodsOffset().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET), (static_cast<int32_t>(art::mirror::DexCache::ResolvedMethodsOffset().Int32Value())))
#define MIRROR_OBJECT_CLASS_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_CLASS_OFFSET), (static_cast<int32_t>(art::mirror::Object:: ClassOffset().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_CLASS_OFFSET), (static_cast<int32_t>(art::mirror::Object::ClassOffset().Int32Value())))
#define MIRROR_OBJECT_LOCK_WORD_OFFSET 4
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_LOCK_WORD_OFFSET), (static_cast<int32_t>(art::mirror::Object:: MonitorOffset().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_LOCK_WORD_OFFSET), (static_cast<int32_t>(art::mirror::Object::MonitorOffset().Int32Value())))
#define MIRROR_CLASS_STATUS_INITIALIZED 0xb
DEFINE_CHECK_EQ(static_cast<uint32_t>(MIRROR_CLASS_STATUS_INITIALIZED), (static_cast<uint32_t>((art::mirror::Class::kStatusInitialized))))
#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE 0x80000000
@@ -65,17 +63,17 @@ DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_INTERFACE), (static_
#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT 0x1f
DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT), (static_cast<uint32_t>((art::MostSignificantBit(art::kAccClassIsFinalizable)))))
#define ART_METHOD_JNI_OFFSET_32 20
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromJniOffset(art::PointerSize::k32).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_32), (static_cast<int32_t>(art::ArtMethod::EntryPointFromJniOffset(art::PointerSize::k32).Int32Value())))
#define ART_METHOD_JNI_OFFSET_64 24
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromJniOffset(art::PointerSize::k64).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_64), (static_cast<int32_t>(art::ArtMethod::EntryPointFromJniOffset(art::PointerSize::k64).Int32Value())))
#define ART_METHOD_QUICK_CODE_OFFSET_32 24
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())))
#define ART_METHOD_QUICK_CODE_OFFSET_64 32
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())))
#define ART_METHOD_DECLARING_CLASS_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DECLARING_CLASS_OFFSET), (static_cast<int32_t>(art::ArtMethod:: DeclaringClassOffset().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DECLARING_CLASS_OFFSET), (static_cast<int32_t>(art::ArtMethod::DeclaringClassOffset().Int32Value())))
#define ART_METHOD_ACCESS_FLAGS_OFFSET 4
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_ACCESS_FLAGS_OFFSET), (static_cast<int32_t>(art::ArtMethod:: AccessFlagsOffset().Int32Value())))
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_ACCESS_FLAGS_OFFSET), (static_cast<int32_t>(art::ArtMethod::AccessFlagsOffset().Int32Value())))
#define STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT 3
DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT), (static_cast<int32_t>(art::WhichPowerOf2(sizeof(art::mirror::StringDexCachePair)))))
#define STRING_DEX_CACHE_SIZE_MINUS_ONE 1023
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 2c82cb1acd..49f202182d 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -137,11 +137,12 @@ static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
method->SetEntryPointFromQuickCompiledCode(quick_code);
}
-bool Instrumentation::NeedDebugVersionFor(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
- return Dbg::IsDebuggerActive() &&
- Runtime::Current()->IsJavaDebuggable() &&
+bool Instrumentation::NeedDebugVersionFor(ArtMethod* method) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return Runtime::Current()->IsJavaDebuggable() &&
!method->IsNative() &&
- !method->IsProxyMethod();
+ !method->IsProxyMethod() &&
+ Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method);
}
void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 68a75b0196..01b7d4e457 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -18,6 +18,7 @@
#include <limits>
+#include "common_dex_operations.h"
#include "common_throws.h"
#include "dex_file_types.h"
#include "interpreter_common.h"
@@ -28,7 +29,7 @@
#include "jvalue-inl.h"
#include "mirror/string-inl.h"
#include "mterp/mterp.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
#include "thread-inl.h"
@@ -287,11 +288,12 @@ static inline JValue Execute(
}
}
- shadow_frame.GetMethod()->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
+ ArtMethod* method = shadow_frame.GetMethod();
+
+ DCheckStaticState(self, method);
// Lock counting is a special version of accessibility checks, and for simplicity and
// reduction of template parameters, we gate it behind access-checks mode.
- ArtMethod* method = shadow_frame.GetMethod();
DCHECK(!method->SkipAccessChecks() || !method->MustCountLocks());
bool transaction_active = Runtime::Current()->IsActiveTransaction();
@@ -312,7 +314,10 @@ static inline JValue Execute(
return ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame, result_register,
false);
}
- bool returned = ExecuteMterpImpl(self, code_item, &shadow_frame, &result_register);
+ bool returned = ExecuteMterpImpl(self,
+ code_item->insns_,
+ &shadow_frame,
+ &result_register);
if (returned) {
return result_register;
} else {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 9fb9fe7274..0a1ae36167 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -22,6 +22,7 @@
#include "debugger.h"
#include "dex_file_types.h"
#include "entrypoints/runtime_asm_entrypoints.h"
+#include "intrinsics_enum.h"
#include "jit/jit.h"
#include "jvalue.h"
#include "method_handles-inl.h"
@@ -588,11 +589,12 @@ void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
}
template<bool is_range>
-bool DoInvokePolymorphic(Thread* self,
- ShadowFrame& shadow_frame,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result)
+static bool DoMethodHandleInvokeCommon(Thread* self,
+ ShadowFrame& shadow_frame,
+ bool invoke_exact,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Make sure to check for async exceptions
if (UNLIKELY(self->ObserveAsyncException())) {
@@ -638,41 +640,381 @@ bool DoInvokePolymorphic(Thread* self,
return false;
}
- ArtMethod* invoke_method =
- class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
- self, invoke_method_idx, shadow_frame.GetMethod(), kVirtual);
-
// There is a common dispatch method for method handles that takes
// arguments either from a range or an array of arguments depending
// on whether the DEX instruction is invoke-polymorphic/range or
// invoke-polymorphic. The array here is for the latter.
uint32_t args[Instruction::kMaxVarArgRegs] = {};
- if (is_range) {
+ if (UNLIKELY(is_range)) {
// VRegC is the register holding the method handle. Arguments passed
// to the method handle's target do not include the method handle.
uint32_t first_arg = inst->VRegC_4rcc() + 1;
- return DoInvokePolymorphic<is_range>(self,
- invoke_method,
- shadow_frame,
- method_handle,
- callsite_type,
- args /* unused */,
- first_arg,
- result);
+ static const bool kIsRange = true;
+ if (invoke_exact) {
+ return art::MethodHandleInvokeExact<kIsRange>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args /* unused */,
+ first_arg,
+ result);
+ } else {
+ return art::MethodHandleInvoke<kIsRange>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args /* unused */,
+ first_arg,
+ result);
+ }
} else {
// Get the register arguments for the invoke.
inst->GetVarArgs(args, inst_data);
// Drop the first register which is the method handle performing the invoke.
memmove(args, args + 1, sizeof(args[0]) * (Instruction::kMaxVarArgRegs - 1));
args[Instruction::kMaxVarArgRegs - 1] = 0;
- return DoInvokePolymorphic<is_range>(self,
- invoke_method,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- args[0],
- result);
+ static const bool kIsRange = false;
+ if (invoke_exact) {
+ return art::MethodHandleInvokeExact<kIsRange>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ args[0],
+ result);
+ } else {
+ return art::MethodHandleInvoke<kIsRange>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ args[0],
+ result);
+ }
+ }
+}
+
+bool DoMethodHandleInvokeExact(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) {
+ static const bool kIsRange = false;
+ return DoMethodHandleInvokeCommon<kIsRange>(
+ self, shadow_frame, true /* is_exact */, inst, inst_data, result);
+ } else {
+ DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE);
+ static const bool kIsRange = true;
+ return DoMethodHandleInvokeCommon<kIsRange>(
+ self, shadow_frame, true /* is_exact */, inst, inst_data, result);
+ }
+}
+
+bool DoMethodHandleInvoke(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) {
+ static const bool kIsRange = false;
+ return DoMethodHandleInvokeCommon<kIsRange>(
+ self, shadow_frame, false /* is_exact */, inst, inst_data, result);
+ } else {
+ DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE);
+ static const bool kIsRange = true;
+ return DoMethodHandleInvokeCommon<kIsRange>(
+ self, shadow_frame, false /* is_exact */, inst, inst_data, result);
+ }
+}
+
+static bool UnimplementedSignaturePolymorphicMethod(Thread* self ATTRIBUTE_UNUSED,
+ ShadowFrame& shadow_frame ATTRIBUTE_UNUSED,
+ const Instruction* inst ATTRIBUTE_UNUSED,
+ uint16_t inst_data ATTRIBUTE_UNUSED,
+ JValue* result ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ UNIMPLEMENTED(FATAL) << "TODO(oth): b/65872996";
+ return false;
+}
+
+bool DoVarHandleCompareAndExchange(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleCompareAndExchangeAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleCompareAndExchangeRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleCompareAndSet(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGet(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndAdd(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndAddAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndAddRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseAnd(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseAndAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseAndRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseOr(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseOrAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseOrRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseXor(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseXorAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndBitwiseXorRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndSet(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndSetAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetAndSetRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetOpaque(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleGetVolatile(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleSet(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleSetOpaque(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleSetRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleSetVolatile(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleWeakCompareAndSet(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleWeakCompareAndSetAcquire(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleWeakCompareAndSetPlain(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+bool DoVarHandleWeakCompareAndSetRelease(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return UnimplementedSignaturePolymorphicMethod(self, shadow_frame, inst, inst_data, result);
+}
+
+template<bool is_range>
+bool DoInvokePolymorphic(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) {
+ const int invoke_method_idx = inst->VRegB();
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ArtMethod* invoke_method =
+ class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
+ self, invoke_method_idx, shadow_frame.GetMethod(), kVirtual);
+
+ // Ensure intrinsic identifiers are initialized.
+ DCHECK(invoke_method->IsIntrinsic());
+
+ // Dispatch based on intrinsic identifier associated with method.
+ switch (static_cast<art::Intrinsics>(invoke_method->GetIntrinsic())) {
+#define CASE_SIGNATURE_POLYMORPHIC_INTRINSIC(Name, ...) \
+ case Intrinsics::k##Name: \
+ return Do ## Name(self, shadow_frame, inst, inst_data, result);
+#include "intrinsics_list.h"
+ SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(CASE_SIGNATURE_POLYMORPHIC_INTRINSIC)
+#undef INTRINSICS_LIST
+#undef SIGNATURE_POLYMORPHIC_INTRINSICS_LIST
+#undef CASE_SIGNATURE_POLYMORPHIC_INTRINSIC
+ default:
+ LOG(FATAL) << "Unreachable: " << invoke_method->GetIntrinsic();
+ UNREACHABLE();
+ return false;
}
}
@@ -839,19 +1181,16 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
// Invoke the bootstrap method handle.
JValue result;
- // This array of arguments is unused. DoInvokePolymorphic() operates on either a
+ // This array of arguments is unused. DoMethodHandleInvokeExact() operates on either a
// an argument array or a range, but always takes an array argument.
uint32_t args_unused[Instruction::kMaxVarArgRegs];
- ArtMethod* invoke_exact =
- jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact);
- bool invoke_success = DoInvokePolymorphic<true /* is_range */>(self,
- invoke_exact,
- *bootstrap_frame,
- bootstrap,
- bootstrap_method_type,
- args_unused,
- 0,
- &result);
+ bool invoke_success = art::MethodHandleInvokeExact<true /* is_range */>(self,
+ *bootstrap_frame,
+ bootstrap,
+ bootstrap_method_type,
+ args_unused,
+ 0,
+ &result);
if (!invoke_success) {
DCHECK(self->IsExceptionPending());
return nullptr;
@@ -942,16 +1281,13 @@ bool DoInvokeCustom(Thread* self,
inst->GetVarArgs(args, inst_data);
}
- ArtMethod* invoke_exact =
- jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact);
- return DoInvokePolymorphic<is_range>(self,
- invoke_exact,
- shadow_frame,
- target,
- target_method_type,
- args,
- args[0],
- result);
+ return art::MethodHandleInvokeExact<is_range>(self,
+ shadow_frame,
+ target,
+ target_method_type,
+ args,
+ args[0],
+ result);
}
template <bool is_range>
@@ -1344,16 +1680,6 @@ EXPLICIT_DO_CALL_TEMPLATE_DECL(true, false);
EXPLICIT_DO_CALL_TEMPLATE_DECL(true, true);
#undef EXPLICIT_DO_CALL_TEMPLATE_DECL
-// Explicit DoInvokeCustom template function declarations.
-#define EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(_is_range) \
- template REQUIRES_SHARED(Locks::mutator_lock_) \
- bool DoInvokeCustom<_is_range>( \
- Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
- uint16_t inst_data, JValue* result)
-EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(false);
-EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(true);
-#undef EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL
-
// Explicit DoInvokePolymorphic template function declarations.
#define EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(_is_range) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
@@ -1364,6 +1690,16 @@ EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(false);
EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(true);
#undef EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL
+// Explicit DoInvokeCustom template function declarations.
+#define EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(_is_range) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
+ bool DoInvokeCustom<_is_range>( \
+ Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
+ uint16_t inst_data, JValue* result)
+EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(false);
+EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(true);
+#undef EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL
+
// Explicit DoFilledNewArray template function declarations.
#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index e7f67ebb0d..f097bc71b9 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -222,7 +222,18 @@ static inline mirror::MethodType* ResolveMethodType(Thread* self,
return class_linker->ResolveMethodType(self, method_type_index, referrer);
}
-// Performs a signature polymorphic invoke (invoke-polymorphic/invoke-polymorphic-range).
+#define DECLARE_SIGNATURE_POLYMORPHIC_HANDLER(Name, ...) \
+bool Do ## Name(Thread* self, \
+ ShadowFrame& shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
+#include "intrinsics_list.h"
+INTRINSICS_LIST(DECLARE_SIGNATURE_POLYMORPHIC_HANDLER)
+#undef INTRINSICS_LIST
+#undef DECLARE_SIGNATURE_POLYMORPHIC_HANDLER
+
+// Performs a invoke-polymorphic or invoke-polymorphic-range.
template<bool is_range>
bool DoInvokePolymorphic(Thread* self,
ShadowFrame& shadow_frame,
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 26de6b4ff7..37593bc728 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -16,8 +16,8 @@
#include "interpreter/interpreter_intrinsics.h"
-#include "compiler/intrinsics_enum.h"
#include "dex_instruction.h"
+#include "intrinsics_enum.h"
#include "interpreter/interpreter_common.h"
namespace art {
@@ -323,14 +323,14 @@ static ALWAYS_INLINE bool MterpStringEquals(ShadowFrame* shadow_frame,
return true;
}
-#define VARHANDLE_FENCE_INTRINSIC(name, std_memory_operation) \
-static ALWAYS_INLINE bool name(ShadowFrame* /* shadow_frame */, \
- const Instruction* /* inst */, \
- uint16_t /* inst_data */, \
- JValue* /* result_register */) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- std::atomic_thread_fence(std_memory_operation); \
- return true; \
+#define VARHANDLE_FENCE_INTRINSIC(name, std_memory_operation) \
+static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame ATTRIBUTE_UNUSED, \
+ const Instruction* inst ATTRIBUTE_UNUSED, \
+ uint16_t inst_data ATTRIBUTE_UNUSED, \
+ JValue* result_register ATTRIBUTE_UNUSED) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ std::atomic_thread_fence(std_memory_operation); \
+ return true; \
}
// The VarHandle fence methods are static (unlike sun.misc.Unsafe versions).
@@ -342,6 +342,63 @@ VARHANDLE_FENCE_INTRINSIC(MterpVarHandleReleaseFence, std::memory_order_release)
VARHANDLE_FENCE_INTRINSIC(MterpVarHandleLoadLoadFence, std::memory_order_acquire)
VARHANDLE_FENCE_INTRINSIC(MterpVarHandleStoreStoreFence, std::memory_order_release)
+#define METHOD_HANDLE_INVOKE_INTRINSIC(name) \
+static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ JValue* result) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) { \
+ return DoInvokePolymorphic<false>(Thread::Current(), *shadow_frame, inst, inst_data, result); \
+ } else { \
+ return DoInvokePolymorphic<true>(Thread::Current(), *shadow_frame, inst, inst_data, result); \
+ } \
+}
+
+METHOD_HANDLE_INVOKE_INTRINSIC(MethodHandleInvokeExact)
+METHOD_HANDLE_INVOKE_INTRINSIC(MethodHandleInvoke)
+
+#define VAR_HANDLE_ACCESSOR_INTRINSIC(name) \
+static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ JValue* result) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ return Do##name(Thread::Current(), *shadow_frame, inst, inst_data, result); \
+}
+
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndExchange)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndExchangeAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndExchangeRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndSet)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGet);
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndAdd)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndAddAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndAddRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseAnd)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseAndAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseAndRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseOr)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseOrAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseOrRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseXor)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseXorAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseXorRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndSet)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndSetAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndSetRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetOpaque)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetVolatile)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSet)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSetOpaque)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSetRelease)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSetVolatile)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSet)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetAcquire)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetPlain)
+VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetRelease)
+
// Macro to help keep track of what's left to implement.
#define UNIMPLEMENTED_CASE(name) \
case Intrinsics::k##name: \
@@ -494,6 +551,39 @@ bool MterpHandleIntrinsic(ShadowFrame* shadow_frame,
INTRINSIC_CASE(VarHandleReleaseFence)
INTRINSIC_CASE(VarHandleLoadLoadFence)
INTRINSIC_CASE(VarHandleStoreStoreFence)
+ INTRINSIC_CASE(MethodHandleInvokeExact)
+ INTRINSIC_CASE(MethodHandleInvoke)
+ INTRINSIC_CASE(VarHandleCompareAndExchange)
+ INTRINSIC_CASE(VarHandleCompareAndExchangeAcquire)
+ INTRINSIC_CASE(VarHandleCompareAndExchangeRelease)
+ INTRINSIC_CASE(VarHandleCompareAndSet)
+ INTRINSIC_CASE(VarHandleGet)
+ INTRINSIC_CASE(VarHandleGetAcquire)
+ INTRINSIC_CASE(VarHandleGetAndAdd)
+ INTRINSIC_CASE(VarHandleGetAndAddAcquire)
+ INTRINSIC_CASE(VarHandleGetAndAddRelease)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseAnd)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseAndAcquire)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseAndRelease)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseOr)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseOrAcquire)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseOrRelease)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseXor)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseXorAcquire)
+ INTRINSIC_CASE(VarHandleGetAndBitwiseXorRelease)
+ INTRINSIC_CASE(VarHandleGetAndSet)
+ INTRINSIC_CASE(VarHandleGetAndSetAcquire)
+ INTRINSIC_CASE(VarHandleGetAndSetRelease)
+ INTRINSIC_CASE(VarHandleGetOpaque)
+ INTRINSIC_CASE(VarHandleGetVolatile)
+ INTRINSIC_CASE(VarHandleSet)
+ INTRINSIC_CASE(VarHandleSetOpaque)
+ INTRINSIC_CASE(VarHandleSetRelease)
+ INTRINSIC_CASE(VarHandleSetVolatile)
+ INTRINSIC_CASE(VarHandleWeakCompareAndSet)
+ INTRINSIC_CASE(VarHandleWeakCompareAndSetAcquire)
+ INTRINSIC_CASE(VarHandleWeakCompareAndSetPlain)
+ INTRINSIC_CASE(VarHandleWeakCompareAndSetRelease)
case Intrinsics::kNone:
res = false;
break;
diff --git a/runtime/interpreter/interpreter_mterp_impl.h b/runtime/interpreter/interpreter_mterp_impl.h
index 1be20fab25..7aa5a34bd4 100644
--- a/runtime/interpreter/interpreter_mterp_impl.h
+++ b/runtime/interpreter/interpreter_mterp_impl.h
@@ -32,7 +32,7 @@ namespace interpreter {
// Mterp does not support transactions or access check, thus no templated versions.
extern "C" bool ExecuteMterpImpl(Thread* self,
- const DexFile::CodeItem* code_item,
+ const uint16_t* dex_instructions,
ShadowFrame* shadow_frame,
JValue* result_register) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/interpreter/mterp/arm/entry.S b/runtime/interpreter/mterp/arm/entry.S
index ce14b548d0..de617a90d7 100644
--- a/runtime/interpreter/mterp/arm/entry.S
+++ b/runtime/interpreter/mterp/arm/entry.S
@@ -19,8 +19,6 @@
.text
.align 2
- .global ExecuteMterpImpl
- .type ExecuteMterpImpl, %function
/*
* On entry:
@@ -48,8 +46,8 @@ ENTRY ExecuteMterpImpl
/* Remember the return register */
str r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
- /* Remember the code_item */
- str r1, [r2, #SHADOWFRAME_CODE_ITEM_OFFSET]
+ /* Remember the dex instruction pointer */
+ str r1, [r2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
/* set up "named" registers */
mov rSELF, r0
@@ -57,8 +55,7 @@ ENTRY ExecuteMterpImpl
add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to vregs.
VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame
ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
- add rPC, r1, #CODEITEM_INSNS_OFFSET @ Point to base of insns[]
- add rPC, rPC, r0, lsl #1 @ Create direct pointer to 1st dex opcode
+ add rPC, r1, r0, lsl #1 @ Create direct pointer to 1st dex opcode
EXPORT_PC
/* Starting ibase */
diff --git a/runtime/interpreter/mterp/arm/footer.S b/runtime/interpreter/mterp/arm/footer.S
index c6801e5dda..f3a3ad25fc 100644
--- a/runtime/interpreter/mterp/arm/footer.S
+++ b/runtime/interpreter/mterp/arm/footer.S
@@ -97,11 +97,10 @@ MterpException:
bl MterpHandleException @ (self, shadow_frame)
cmp r0, #0
beq MterpExceptionReturn @ no local catch, back to caller.
- ldr r0, [rFP, #OFF_FP_CODE_ITEM]
+ ldr r0, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
ldr r1, [rFP, #OFF_FP_DEX_PC]
ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
- add rPC, r0, #CODEITEM_INSNS_OFFSET
- add rPC, rPC, r1, lsl #1 @ generate new dex_pc_ptr
+ add rPC, r0, r1, lsl #1 @ generate new dex_pc_ptr
/* Do we need to switch interpreters? */
bl MterpShouldSwitchInterpreters
cmp r0, #0
diff --git a/runtime/interpreter/mterp/arm/header.S b/runtime/interpreter/mterp/arm/header.S
index 597d9d46a5..51c2ba4c03 100644
--- a/runtime/interpreter/mterp/arm/header.S
+++ b/runtime/interpreter/mterp/arm/header.S
@@ -110,7 +110,7 @@ unspecified registers or condition codes.
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
/*
@@ -130,9 +130,8 @@ unspecified registers or condition codes.
.endm
.macro EXPORT_DEX_PC tmp
- ldr \tmp, [rFP, #OFF_FP_CODE_ITEM]
+ ldr \tmp, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
- add \tmp, #CODEITEM_INSNS_OFFSET
sub \tmp, rPC, \tmp
asr \tmp, #1
str \tmp, [rFP, #OFF_FP_DEX_PC]
diff --git a/runtime/interpreter/mterp/arm/op_aget_wide.S b/runtime/interpreter/mterp/arm/op_aget_wide.S
index 853a7a4e79..66ec950531 100644
--- a/runtime/interpreter/mterp/arm/op_aget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_aget_wide.S
@@ -10,7 +10,6 @@
mov r3, r0, lsr #8 @ r3<- CC
GET_VREG r0, r2 @ r0<- vBB (array object)
GET_VREG r1, r3 @ r1<- vCC (requested index)
- CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
cmp r0, #0 @ null array object?
beq common_errNullObject @ yes, bail
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
@@ -18,6 +17,7 @@
cmp r1, r3 @ compare unsigned index, length
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm64/close_cfi.S b/runtime/interpreter/mterp/arm64/close_cfi.S
new file mode 100644
index 0000000000..7ba0486079
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/close_cfi.S
@@ -0,0 +1,4 @@
+// Close out the cfi info. We're treating mterp as a single function.
+
+END ExecuteMterpImpl
+
diff --git a/runtime/interpreter/mterp/arm64/entry.S b/runtime/interpreter/mterp/arm64/entry.S
index 73c5a88e5f..f3d40ff6f7 100644
--- a/runtime/interpreter/mterp/arm64/entry.S
+++ b/runtime/interpreter/mterp/arm64/entry.S
@@ -25,12 +25,7 @@
* x3 JValue* result_register
*
*/
- .global ExecuteMterpImpl
- .type ExecuteMterpImpl, %function
- .balign 16
-
-ExecuteMterpImpl:
- .cfi_startproc
+ENTRY ExecuteMterpImpl
SAVE_TWO_REGS_INCREASE_FRAME xPROFILE, x27, 80
SAVE_TWO_REGS xIBASE, xREFS, 16
SAVE_TWO_REGS xSELF, xINST, 32
@@ -41,8 +36,8 @@ ExecuteMterpImpl:
/* Remember the return register */
str x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
- /* Remember the code_item */
- str x1, [x2, #SHADOWFRAME_CODE_ITEM_OFFSET]
+ /* Remember the dex instruction pointer */
+ str x1, [x2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
/* set up "named" registers */
mov xSELF, x0
@@ -50,8 +45,7 @@ ExecuteMterpImpl:
add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to vregs.
add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame
ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
- add xPC, x1, #CODEITEM_INSNS_OFFSET // Point to base of insns[]
- add xPC, xPC, w0, lsl #1 // Create direct pointer to 1st dex opcode
+ add xPC, x1, w0, lsl #1 // Create direct pointer to 1st dex opcode
EXPORT_PC
/* Starting ibase */
diff --git a/runtime/interpreter/mterp/arm64/footer.S b/runtime/interpreter/mterp/arm64/footer.S
index 388fc8db74..0ce3543911 100644
--- a/runtime/interpreter/mterp/arm64/footer.S
+++ b/runtime/interpreter/mterp/arm64/footer.S
@@ -93,11 +93,10 @@ MterpException:
add x1, xFP, #OFF_FP_SHADOWFRAME
bl MterpHandleException // (self, shadow_frame)
cbz w0, MterpExceptionReturn // no local catch, back to caller.
- ldr x0, [xFP, #OFF_FP_CODE_ITEM]
+ ldr x0, [xFP, #OFF_FP_DEX_INSTRUCTIONS]
ldr w1, [xFP, #OFF_FP_DEX_PC]
ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
- add xPC, x0, #CODEITEM_INSNS_OFFSET
- add xPC, xPC, x1, lsl #1 // generate new dex_pc_ptr
+ add xPC, x0, x1, lsl #1 // generate new dex_pc_ptr
/* Do we need to switch interpreters? */
bl MterpShouldSwitchInterpreters
cbnz w0, MterpFallback
@@ -305,6 +304,3 @@ MterpProfileActive:
RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
ret
- .cfi_endproc
- .size ExecuteMterpImpl, .-ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/arm64/header.S b/runtime/interpreter/mterp/arm64/header.S
index 7125d5a74d..47f12d2f5d 100644
--- a/runtime/interpreter/mterp/arm64/header.S
+++ b/runtime/interpreter/mterp/arm64/header.S
@@ -116,7 +116,7 @@ codes.
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
/*
@@ -330,3 +330,20 @@ codes.
.cfi_restore \reg2
.cfi_adjust_cfa_offset -(\frame_adjustment)
.endm
+
+/*
+ * cfi support macros.
+ */
+.macro ENTRY name
+ .type \name, #function
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+ .cfi_startproc
+.endm
+
+.macro END name
+ .cfi_endproc
+ .size \name, .-\name
+.endm
diff --git a/runtime/interpreter/mterp/config_arm64 b/runtime/interpreter/mterp/config_arm64
index 0831c3b4bd..590363f6e4 100644
--- a/runtime/interpreter/mterp/config_arm64
+++ b/runtime/interpreter/mterp/config_arm64
@@ -301,3 +301,6 @@ asm-alt-stub arm64/alt_stub.S
# emit alternate entry stubs
alt-ops
+
+# finish by closing .cfi info
+import arm64/close_cfi.S
diff --git a/runtime/interpreter/mterp/gen_mterp.py b/runtime/interpreter/mterp/gen_mterp.py
index 5839b5fc97..1c9af30d0a 100755
--- a/runtime/interpreter/mterp/gen_mterp.py
+++ b/runtime/interpreter/mterp/gen_mterp.py
@@ -284,7 +284,6 @@ def loadAndEmitOpcodes():
# point MterpAsmInstructionStart at the first handler or stub
asm_fp.write("\n .global %s\n" % start_label)
- asm_fp.write(" " + (function_type_format % start_label) + "\n");
asm_fp.write("%s = " % start_label + label_prefix + "_op_nop\n")
asm_fp.write(" .text\n\n")
@@ -310,7 +309,6 @@ def loadAndEmitOpcodes():
asm_fp.write(label_prefix + "_op_nop: /* dummy */\n");
emitAlign()
- asm_fp.write(" " + (function_size_format % (start_label, start_label)) + "\n")
asm_fp.write(" .global %s\n" % end_label)
asm_fp.write("%s:\n" % end_label)
@@ -319,12 +317,10 @@ def loadAndEmitOpcodes():
end_sister_label = global_name_format % "artMterpAsmSisterEnd"
emitSectionComment("Sister implementations", asm_fp)
asm_fp.write(" .global %s\n" % start_sister_label)
- asm_fp.write(" " + (function_type_format % start_sister_label) + "\n");
asm_fp.write(" .text\n")
asm_fp.write(" .balign 4\n")
asm_fp.write("%s:\n" % start_sister_label)
asm_fp.writelines(sister_list)
- asm_fp.write("\n " + (function_size_format % (start_sister_label, start_sister_label)) + "\n")
asm_fp.write(" .global %s\n" % end_sister_label)
asm_fp.write("%s:\n\n" % end_sister_label)
@@ -351,7 +347,6 @@ def loadAndEmitAltOpcodes():
# point MterpAsmInstructionStart at the first handler or stub
asm_fp.write("\n .global %s\n" % start_label)
- asm_fp.write(" " + (function_type_format % start_label) + "\n");
asm_fp.write(" .text\n\n")
asm_fp.write("%s = " % start_label + label_prefix + "_ALT_op_nop\n")
@@ -364,7 +359,6 @@ def loadAndEmitAltOpcodes():
loadAndEmitAltStub(source, i)
emitAlign()
- asm_fp.write(" " + (function_size_format % (start_label, start_label)) + "\n")
asm_fp.write(" .global %s\n" % end_label)
asm_fp.write("%s:\n" % end_label)
diff --git a/runtime/interpreter/mterp/mips/entry.S b/runtime/interpreter/mterp/mips/entry.S
index f617a4d08b..03de985cd0 100644
--- a/runtime/interpreter/mterp/mips/entry.S
+++ b/runtime/interpreter/mterp/mips/entry.S
@@ -25,7 +25,7 @@
/*
* On entry:
* a0 Thread* self
- * a1 code_item
+ * a1 dex_instructions
* a2 ShadowFrame
* a3 JValue* result_register
*
@@ -43,8 +43,8 @@ ExecuteMterpImpl:
/* Remember the return register */
sw a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
- /* Remember the code_item */
- sw a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
+ /* Remember the dex instruction pointer */
+ sw a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
/* set up "named" registers */
move rSELF, a0
@@ -52,8 +52,7 @@ ExecuteMterpImpl:
addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to vregs.
EAS2(rREFS, rFP, a0) # point to reference array in shadow frame
lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc
- addu rPC, a1, CODEITEM_INSNS_OFFSET # Point to base of insns[]
- EAS1(rPC, rPC, a0) # Create direct pointer to 1st dex opcode
+ EAS1(rPC, a1, a0) # Create direct pointer to 1st dex opcode
EXPORT_PC()
diff --git a/runtime/interpreter/mterp/mips/footer.S b/runtime/interpreter/mterp/mips/footer.S
index 9909dfeb47..6e1ba1c882 100644
--- a/runtime/interpreter/mterp/mips/footer.S
+++ b/runtime/interpreter/mterp/mips/footer.S
@@ -95,12 +95,10 @@ MterpException:
addu a1, rFP, OFF_FP_SHADOWFRAME
JAL(MterpHandleException) # (self, shadow_frame)
beqz v0, MterpExceptionReturn # no local catch, back to caller.
- lw a0, OFF_FP_CODE_ITEM(rFP)
+ lw a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
lw a1, OFF_FP_DEX_PC(rFP)
lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
- addu rPC, a0, CODEITEM_INSNS_OFFSET
- sll a1, a1, 1
- addu rPC, rPC, a1 # generate new dex_pc_ptr
+ EAS1(rPC, a0, a1) # generate new dex_pc_ptr
/* Do we need to switch interpreters? */
JAL(MterpShouldSwitchInterpreters)
bnez v0, MterpFallback
diff --git a/runtime/interpreter/mterp/mips/header.S b/runtime/interpreter/mterp/mips/header.S
index 0ce7745120..e4552ddf3d 100644
--- a/runtime/interpreter/mterp/mips/header.S
+++ b/runtime/interpreter/mterp/mips/header.S
@@ -216,7 +216,7 @@
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
#define MTERP_PROFILE_BRANCHES 1
@@ -238,9 +238,8 @@
sw rPC, OFF_FP_DEX_PC_PTR(rFP)
#define EXPORT_DEX_PC(tmp) \
- lw tmp, OFF_FP_CODE_ITEM(rFP); \
+ lw tmp, OFF_FP_DEX_INSTRUCTIONS(rFP); \
sw rPC, OFF_FP_DEX_PC_PTR(rFP); \
- addu tmp, CODEITEM_INSNS_OFFSET; \
subu tmp, rPC, tmp; \
sra tmp, tmp, 1; \
sw tmp, OFF_FP_DEX_PC(rFP)
diff --git a/runtime/interpreter/mterp/mips64/entry.S b/runtime/interpreter/mterp/mips64/entry.S
index 5536966be2..436b88dbd0 100644
--- a/runtime/interpreter/mterp/mips64/entry.S
+++ b/runtime/interpreter/mterp/mips64/entry.S
@@ -27,7 +27,7 @@
/*
* On entry:
* a0 Thread* self
- * a1 code_item
+ * a1 dex_instructions
* a2 ShadowFrame
* a3 JValue* result_register
*
@@ -63,17 +63,16 @@ ExecuteMterpImpl:
/* Remember the return register */
sd a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
- /* Remember the code_item */
- sd a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
+ /* Remember the dex instruction pointer */
+ sd a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
/* set up "named" registers */
move rSELF, a0
daddu rFP, a2, SHADOWFRAME_VREGS_OFFSET
lw v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
dlsa rREFS, v0, rFP, 2
- daddu rPC, a1, CODEITEM_INSNS_OFFSET
lw v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
- dlsa rPC, v0, rPC, 1
+ dlsa rPC, v0, a1, 1
EXPORT_PC
/* Starting ibase */
diff --git a/runtime/interpreter/mterp/mips64/footer.S b/runtime/interpreter/mterp/mips64/footer.S
index 312fa9c009..779b1fb88f 100644
--- a/runtime/interpreter/mterp/mips64/footer.S
+++ b/runtime/interpreter/mterp/mips64/footer.S
@@ -55,11 +55,10 @@ MterpException:
daddu a1, rFP, OFF_FP_SHADOWFRAME
jal MterpHandleException # (self, shadow_frame)
beqzc v0, MterpExceptionReturn # no local catch, back to caller.
- ld a0, OFF_FP_CODE_ITEM(rFP)
+ ld a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
lwu a1, OFF_FP_DEX_PC(rFP)
REFRESH_IBASE
- daddu rPC, a0, CODEITEM_INSNS_OFFSET
- dlsa rPC, a1, rPC, 1 # generate new dex_pc_ptr
+ dlsa rPC, a1, a0, 1 # generate new dex_pc_ptr
/* Do we need to switch interpreters? */
jal MterpShouldSwitchInterpreters
bnezc v0, MterpFallback
diff --git a/runtime/interpreter/mterp/mips64/header.S b/runtime/interpreter/mterp/mips64/header.S
index 264c411a55..d1acefd338 100644
--- a/runtime/interpreter/mterp/mips64/header.S
+++ b/runtime/interpreter/mterp/mips64/header.S
@@ -114,7 +114,7 @@ The following registers have fixed assignments:
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
#define MTERP_PROFILE_BRANCHES 1
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 404c2609e8..92dd19ed2f 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -577,7 +577,7 @@ extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16
self->AssertNoPendingException();
}
if (kTraceExecutionEnabled) {
- uint32_t dex_pc = dex_pc_ptr - shadow_frame->GetCodeItem()->insns_;
+ uint32_t dex_pc = dex_pc_ptr - shadow_frame->GetDexInstructions();
TraceExecution(*shadow_frame, inst, dex_pc);
}
if (kTestExportPC) {
diff --git a/runtime/interpreter/mterp/mterp_stub.cc b/runtime/interpreter/mterp/mterp_stub.cc
index 35f8f1c7e7..e515ec4471 100644
--- a/runtime/interpreter/mterp/mterp_stub.cc
+++ b/runtime/interpreter/mterp/mterp_stub.cc
@@ -38,8 +38,10 @@ void InitMterpTls(Thread* self) {
/*
* The platform-specific implementation must provide this.
*/
-extern "C" bool ExecuteMterpImpl(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result_register)
+extern "C" bool ExecuteMterpImpl(Thread* self,
+ const uint16_t* dex_instructions,
+ ShadowFrame* shadow_frame,
+ JValue* result_register)
REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self); UNUSED(shadow_frame); UNUSED(code_item); UNUSED(result_register);
UNIMPLEMENTED(art::FATAL);
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index 393a9cc112..69d7edbe8a 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -117,7 +117,7 @@ unspecified registers or condition codes.
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
/*
@@ -137,9 +137,8 @@ unspecified registers or condition codes.
.endm
.macro EXPORT_DEX_PC tmp
- ldr \tmp, [rFP, #OFF_FP_CODE_ITEM]
+ ldr \tmp, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
- add \tmp, #CODEITEM_INSNS_OFFSET
sub \tmp, rPC, \tmp
asr \tmp, #1
str \tmp, [rFP, #OFF_FP_DEX_PC]
@@ -338,8 +337,6 @@ unspecified registers or condition codes.
.text
.align 2
- .global ExecuteMterpImpl
- .type ExecuteMterpImpl, %function
/*
* On entry:
@@ -367,8 +364,8 @@ ENTRY ExecuteMterpImpl
/* Remember the return register */
str r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
- /* Remember the code_item */
- str r1, [r2, #SHADOWFRAME_CODE_ITEM_OFFSET]
+ /* Remember the dex instruction pointer */
+ str r1, [r2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
/* set up "named" registers */
mov rSELF, r0
@@ -376,8 +373,7 @@ ENTRY ExecuteMterpImpl
add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to vregs.
VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame
ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
- add rPC, r1, #CODEITEM_INSNS_OFFSET @ Point to base of insns[]
- add rPC, rPC, r0, lsl #1 @ Create direct pointer to 1st dex opcode
+ add rPC, r1, r0, lsl #1 @ Create direct pointer to 1st dex opcode
EXPORT_PC
/* Starting ibase */
@@ -398,7 +394,6 @@ ENTRY ExecuteMterpImpl
.global artMterpAsmInstructionStart
- .type artMterpAsmInstructionStart, %function
artMterpAsmInstructionStart = .L_op_nop
.text
@@ -1839,7 +1834,6 @@ artMterpAsmInstructionStart = .L_op_nop
mov r3, r0, lsr #8 @ r3<- CC
GET_VREG r0, r2 @ r0<- vBB (array object)
GET_VREG r1, r3 @ r1<- vCC (requested index)
- CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
cmp r0, #0 @ null array object?
beq common_errNullObject @ yes, bail
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
@@ -1847,6 +1841,7 @@ artMterpAsmInstructionStart = .L_op_nop
cmp r1, r3 @ compare unsigned index, length
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -7510,7 +7505,6 @@ constvalop_long_to_double:
.balign 128
- .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
@@ -7520,7 +7514,6 @@ artMterpAsmInstructionEnd:
* ===========================================================================
*/
.global artMterpAsmSisterStart
- .type artMterpAsmSisterStart, %function
.text
.balign 4
artMterpAsmSisterStart:
@@ -7586,14 +7579,11 @@ d2l_maybeNaN:
mov r0, #0
mov r1, #0
bx lr @ return 0 for NaN
-
- .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
.global artMterpAsmAltInstructionStart
- .type artMterpAsmAltInstructionStart, %function
.text
artMterpAsmAltInstructionStart = .L_ALT_op_nop
@@ -11950,7 +11940,6 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
.balign 128
- .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
.global artMterpAsmAltInstructionEnd
artMterpAsmAltInstructionEnd:
/* File: arm/footer.S */
@@ -12053,11 +12042,10 @@ MterpException:
bl MterpHandleException @ (self, shadow_frame)
cmp r0, #0
beq MterpExceptionReturn @ no local catch, back to caller.
- ldr r0, [rFP, #OFF_FP_CODE_ITEM]
+ ldr r0, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
ldr r1, [rFP, #OFF_FP_DEX_PC]
ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
- add rPC, r0, #CODEITEM_INSNS_OFFSET
- add rPC, rPC, r1, lsl #1 @ generate new dex_pc_ptr
+ add rPC, r0, r1, lsl #1 @ generate new dex_pc_ptr
/* Do we need to switch interpreters? */
bl MterpShouldSwitchInterpreters
cmp r0, #0
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index 80a7f1244a..82edab465e 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -123,7 +123,7 @@ codes.
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
/*
@@ -338,6 +338,23 @@ codes.
.cfi_adjust_cfa_offset -(\frame_adjustment)
.endm
+/*
+ * cfi support macros.
+ */
+.macro ENTRY name
+ .type \name, #function
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+ .cfi_startproc
+.endm
+
+.macro END name
+ .cfi_endproc
+ .size \name, .-\name
+.endm
+
/* File: arm64/entry.S */
/*
* Copyright (C) 2016 The Android Open Source Project
@@ -366,12 +383,7 @@ codes.
* x3 JValue* result_register
*
*/
- .global ExecuteMterpImpl
- .type ExecuteMterpImpl, %function
- .balign 16
-
-ExecuteMterpImpl:
- .cfi_startproc
+ENTRY ExecuteMterpImpl
SAVE_TWO_REGS_INCREASE_FRAME xPROFILE, x27, 80
SAVE_TWO_REGS xIBASE, xREFS, 16
SAVE_TWO_REGS xSELF, xINST, 32
@@ -382,8 +394,8 @@ ExecuteMterpImpl:
/* Remember the return register */
str x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
- /* Remember the code_item */
- str x1, [x2, #SHADOWFRAME_CODE_ITEM_OFFSET]
+ /* Remember the dex instruction pointer */
+ str x1, [x2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
/* set up "named" registers */
mov xSELF, x0
@@ -391,8 +403,7 @@ ExecuteMterpImpl:
add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to vregs.
add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame
ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
- add xPC, x1, #CODEITEM_INSNS_OFFSET // Point to base of insns[]
- add xPC, xPC, w0, lsl #1 // Create direct pointer to 1st dex opcode
+ add xPC, x1, w0, lsl #1 // Create direct pointer to 1st dex opcode
EXPORT_PC
/* Starting ibase */
@@ -413,7 +424,6 @@ ExecuteMterpImpl:
.global artMterpAsmInstructionStart
- .type artMterpAsmInstructionStart, %function
artMterpAsmInstructionStart = .L_op_nop
.text
@@ -7060,7 +7070,6 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
- .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
@@ -7070,12 +7079,9 @@ artMterpAsmInstructionEnd:
* ===========================================================================
*/
.global artMterpAsmSisterStart
- .type artMterpAsmSisterStart, %function
.text
.balign 4
artMterpAsmSisterStart:
-
- .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
@@ -7175,11 +7181,10 @@ MterpException:
add x1, xFP, #OFF_FP_SHADOWFRAME
bl MterpHandleException // (self, shadow_frame)
cbz w0, MterpExceptionReturn // no local catch, back to caller.
- ldr x0, [xFP, #OFF_FP_CODE_ITEM]
+ ldr x0, [xFP, #OFF_FP_DEX_INSTRUCTIONS]
ldr w1, [xFP, #OFF_FP_DEX_PC]
ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
- add xPC, x0, #CODEITEM_INSNS_OFFSET
- add xPC, xPC, x1, lsl #1 // generate new dex_pc_ptr
+ add xPC, x0, x1, lsl #1 // generate new dex_pc_ptr
/* Do we need to switch interpreters? */
bl MterpShouldSwitchInterpreters
cbnz w0, MterpFallback
@@ -7387,13 +7392,9 @@ MterpProfileActive:
RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
ret
- .cfi_endproc
- .size ExecuteMterpImpl, .-ExecuteMterpImpl
-
.global artMterpAsmAltInstructionStart
- .type artMterpAsmAltInstructionStart, %function
.text
artMterpAsmAltInstructionStart = .L_ALT_op_nop
@@ -11750,6 +11751,11 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
.balign 128
- .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
.global artMterpAsmAltInstructionEnd
artMterpAsmAltInstructionEnd:
+/* File: arm64/close_cfi.S */
+// Close out the cfi info. We're treating mterp as a single function.
+
+END ExecuteMterpImpl
+
+
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index 74fee39a9e..8cc1b19128 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -223,7 +223,7 @@
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
#define MTERP_PROFILE_BRANCHES 1
@@ -245,9 +245,8 @@
sw rPC, OFF_FP_DEX_PC_PTR(rFP)
#define EXPORT_DEX_PC(tmp) \
- lw tmp, OFF_FP_CODE_ITEM(rFP); \
+ lw tmp, OFF_FP_DEX_INSTRUCTIONS(rFP); \
sw rPC, OFF_FP_DEX_PC_PTR(rFP); \
- addu tmp, CODEITEM_INSNS_OFFSET; \
subu tmp, rPC, tmp; \
sra tmp, tmp, 1; \
sw tmp, OFF_FP_DEX_PC(rFP)
@@ -759,7 +758,7 @@
/*
* On entry:
* a0 Thread* self
- * a1 code_item
+ * a1 dex_instructions
* a2 ShadowFrame
* a3 JValue* result_register
*
@@ -777,8 +776,8 @@ ExecuteMterpImpl:
/* Remember the return register */
sw a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
- /* Remember the code_item */
- sw a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
+ /* Remember the dex instruction pointer */
+ sw a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
/* set up "named" registers */
move rSELF, a0
@@ -786,8 +785,7 @@ ExecuteMterpImpl:
addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to vregs.
EAS2(rREFS, rFP, a0) # point to reference array in shadow frame
lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc
- addu rPC, a1, CODEITEM_INSNS_OFFSET # Point to base of insns[]
- EAS1(rPC, rPC, a0) # Create direct pointer to 1st dex opcode
+ EAS1(rPC, a1, a0) # Create direct pointer to 1st dex opcode
EXPORT_PC()
@@ -809,7 +807,6 @@ ExecuteMterpImpl:
.global artMterpAsmInstructionStart
- .type artMterpAsmInstructionStart, %function
artMterpAsmInstructionStart = .L_op_nop
.text
@@ -7871,7 +7868,6 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
- .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
@@ -7881,7 +7877,6 @@ artMterpAsmInstructionEnd:
* ===========================================================================
*/
.global artMterpAsmSisterStart
- .type artMterpAsmSisterStart, %function
.text
.balign 4
artMterpAsmSisterStart:
@@ -7941,14 +7936,11 @@ artMterpAsmSisterStart:
.Lop_ushr_long_2addr_finish:
SET_VREG64_GOTO(v1, zero, t3, t0) # vA/vA+1 <- rlo/rhi
-
- .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
.global artMterpAsmAltInstructionStart
- .type artMterpAsmAltInstructionStart, %function
.text
artMterpAsmAltInstructionStart = .L_ALT_op_nop
@@ -12561,7 +12553,6 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
.balign 128
- .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
.global artMterpAsmAltInstructionEnd
artMterpAsmAltInstructionEnd:
/* File: mips/footer.S */
@@ -12662,12 +12653,10 @@ MterpException:
addu a1, rFP, OFF_FP_SHADOWFRAME
JAL(MterpHandleException) # (self, shadow_frame)
beqz v0, MterpExceptionReturn # no local catch, back to caller.
- lw a0, OFF_FP_CODE_ITEM(rFP)
+ lw a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
lw a1, OFF_FP_DEX_PC(rFP)
lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
- addu rPC, a0, CODEITEM_INSNS_OFFSET
- sll a1, a1, 1
- addu rPC, rPC, a1 # generate new dex_pc_ptr
+ EAS1(rPC, a0, a1) # generate new dex_pc_ptr
/* Do we need to switch interpreters? */
JAL(MterpShouldSwitchInterpreters)
bnez v0, MterpFallback
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index 27fa318ea8..139ee25904 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -121,7 +121,7 @@ The following registers have fixed assignments:
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
#define MTERP_PROFILE_BRANCHES 1
@@ -361,7 +361,7 @@ The following registers have fixed assignments:
/*
* On entry:
* a0 Thread* self
- * a1 code_item
+ * a1 dex_instructions
* a2 ShadowFrame
* a3 JValue* result_register
*
@@ -397,17 +397,16 @@ ExecuteMterpImpl:
/* Remember the return register */
sd a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
- /* Remember the code_item */
- sd a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
+ /* Remember the dex instruction pointer */
+ sd a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
/* set up "named" registers */
move rSELF, a0
daddu rFP, a2, SHADOWFRAME_VREGS_OFFSET
lw v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
dlsa rREFS, v0, rFP, 2
- daddu rPC, a1, CODEITEM_INSNS_OFFSET
lw v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
- dlsa rPC, v0, rPC, 1
+ dlsa rPC, v0, a1, 1
EXPORT_PC
/* Starting ibase */
@@ -429,7 +428,6 @@ ExecuteMterpImpl:
.global artMterpAsmInstructionStart
- .type artMterpAsmInstructionStart, %function
artMterpAsmInstructionStart = .L_op_nop
.text
@@ -7297,7 +7295,6 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
- .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
@@ -7307,18 +7304,14 @@ artMterpAsmInstructionEnd:
* ===========================================================================
*/
.global artMterpAsmSisterStart
- .type artMterpAsmSisterStart, %function
.text
.balign 4
artMterpAsmSisterStart:
-
- .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
.global artMterpAsmAltInstructionStart
- .type artMterpAsmAltInstructionStart, %function
.text
artMterpAsmAltInstructionStart = .L_ALT_op_nop
@@ -12187,7 +12180,6 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
.balign 128
- .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
.global artMterpAsmAltInstructionEnd
artMterpAsmAltInstructionEnd:
/* File: mips64/footer.S */
@@ -12248,11 +12240,10 @@ MterpException:
daddu a1, rFP, OFF_FP_SHADOWFRAME
jal MterpHandleException # (self, shadow_frame)
beqzc v0, MterpExceptionReturn # no local catch, back to caller.
- ld a0, OFF_FP_CODE_ITEM(rFP)
+ ld a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
lwu a1, OFF_FP_DEX_PC(rFP)
REFRESH_IBASE
- daddu rPC, a0, CODEITEM_INSNS_OFFSET
- dlsa rPC, a1, rPC, 1 # generate new dex_pc_ptr
+ dlsa rPC, a1, a0, 1 # generate new dex_pc_ptr
/* Do we need to switch interpreters? */
jal MterpShouldSwitchInterpreters
bnezc v0, MterpFallback
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index 984ecfa4c1..cbab61ebf6 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -135,7 +135,7 @@ unspecified registers or condition codes.
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
@@ -371,15 +371,14 @@ SYMBOL(ExecuteMterpImpl):
/* Remember the code_item */
movl IN_ARG1(%esp), %ecx
- movl %ecx, SHADOWFRAME_CODE_ITEM_OFFSET(%edx)
+ movl %ecx, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(%edx)
/* set up "named" registers */
movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(%edx), %eax
leal SHADOWFRAME_VREGS_OFFSET(%edx), rFP
leal (rFP, %eax, 4), rREFS
movl SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
- lea CODEITEM_INSNS_OFFSET(%ecx), rPC
- lea (rPC, %eax, 2), rPC
+ lea (%ecx, %eax, 2), rPC
EXPORT_PC
/* Set up for backwards branches & osr profiling */
@@ -401,7 +400,6 @@ SYMBOL(ExecuteMterpImpl):
.global SYMBOL(artMterpAsmInstructionStart)
- FUNCTION_TYPE(SYMBOL(artMterpAsmInstructionStart))
SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.text
@@ -6465,7 +6463,6 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.balign 128
- SIZE(SYMBOL(artMterpAsmInstructionStart),SYMBOL(artMterpAsmInstructionStart))
.global SYMBOL(artMterpAsmInstructionEnd)
SYMBOL(artMterpAsmInstructionEnd):
@@ -6475,18 +6472,14 @@ SYMBOL(artMterpAsmInstructionEnd):
* ===========================================================================
*/
.global SYMBOL(artMterpAsmSisterStart)
- FUNCTION_TYPE(SYMBOL(artMterpAsmSisterStart))
.text
.balign 4
SYMBOL(artMterpAsmSisterStart):
-
- SIZE(SYMBOL(artMterpAsmSisterStart),SYMBOL(artMterpAsmSisterStart))
.global SYMBOL(artMterpAsmSisterEnd)
SYMBOL(artMterpAsmSisterEnd):
.global SYMBOL(artMterpAsmAltInstructionStart)
- FUNCTION_TYPE(SYMBOL(artMterpAsmAltInstructionStart))
.text
SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
@@ -12635,7 +12628,6 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
jmp .L_op_nop+(255*128)
.balign 128
- SIZE(SYMBOL(artMterpAsmAltInstructionStart),SYMBOL(artMterpAsmAltInstructionStart))
.global SYMBOL(artMterpAsmAltInstructionEnd)
SYMBOL(artMterpAsmAltInstructionEnd):
/* File: x86/footer.S */
@@ -12756,10 +12748,9 @@ MterpException:
call SYMBOL(MterpHandleException)
testb %al, %al
jz MterpExceptionReturn
- movl OFF_FP_CODE_ITEM(rFP), %eax
+ movl OFF_FP_DEX_INSTRUCTIONS(rFP), %eax
movl OFF_FP_DEX_PC(rFP), %ecx
- lea CODEITEM_INSNS_OFFSET(%eax), rPC
- lea (rPC, %ecx, 2), rPC
+ lea (%eax, %ecx, 2), rPC
movl rPC, OFF_FP_DEX_PC_PTR(rFP)
/* Do we need to switch interpreters? */
call SYMBOL(MterpShouldSwitchInterpreters)
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
index 84f8b52e4a..83c3e4fb91 100644
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -131,7 +131,7 @@ unspecified registers or condition codes.
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
@@ -354,15 +354,14 @@ SYMBOL(ExecuteMterpImpl):
movq IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
/* Remember the code_item */
- movq IN_ARG1, SHADOWFRAME_CODE_ITEM_OFFSET(IN_ARG2)
+ movq IN_ARG1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(IN_ARG2)
/* set up "named" registers */
movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
leaq SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
leaq (rFP, %rax, 4), rREFS
movl SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
- leaq CODEITEM_INSNS_OFFSET(IN_ARG1), rPC
- leaq (rPC, %rax, 2), rPC
+ leaq (IN_ARG1, %rax, 2), rPC
EXPORT_PC
/* Starting ibase */
@@ -383,7 +382,6 @@ SYMBOL(ExecuteMterpImpl):
.global SYMBOL(artMterpAsmInstructionStart)
- FUNCTION_TYPE(SYMBOL(artMterpAsmInstructionStart))
SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.text
@@ -6212,7 +6210,6 @@ movswl %ax, %eax
.balign 128
- SIZE(SYMBOL(artMterpAsmInstructionStart),SYMBOL(artMterpAsmInstructionStart))
.global SYMBOL(artMterpAsmInstructionEnd)
SYMBOL(artMterpAsmInstructionEnd):
@@ -6222,18 +6219,14 @@ SYMBOL(artMterpAsmInstructionEnd):
* ===========================================================================
*/
.global SYMBOL(artMterpAsmSisterStart)
- FUNCTION_TYPE(SYMBOL(artMterpAsmSisterStart))
.text
.balign 4
SYMBOL(artMterpAsmSisterStart):
-
- SIZE(SYMBOL(artMterpAsmSisterStart),SYMBOL(artMterpAsmSisterStart))
.global SYMBOL(artMterpAsmSisterEnd)
SYMBOL(artMterpAsmSisterEnd):
.global SYMBOL(artMterpAsmAltInstructionStart)
- FUNCTION_TYPE(SYMBOL(artMterpAsmAltInstructionStart))
.text
SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
@@ -11870,7 +11863,6 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
jmp .L_op_nop+(255*128)
.balign 128
- SIZE(SYMBOL(artMterpAsmAltInstructionStart),SYMBOL(artMterpAsmAltInstructionStart))
.global SYMBOL(artMterpAsmAltInstructionEnd)
SYMBOL(artMterpAsmAltInstructionEnd):
/* File: x86_64/footer.S */
@@ -11974,10 +11966,9 @@ MterpException:
call SYMBOL(MterpHandleException)
testb %al, %al
jz MterpExceptionReturn
- movq OFF_FP_CODE_ITEM(rFP), %rax
+ movq OFF_FP_DEX_INSTRUCTIONS(rFP), %rax
mov OFF_FP_DEX_PC(rFP), %ecx
- leaq CODEITEM_INSNS_OFFSET(%rax), rPC
- leaq (rPC, %rcx, 2), rPC
+ leaq (%rax, %rcx, 2), rPC
movq rPC, OFF_FP_DEX_PC_PTR(rFP)
/* Do we need to switch interpreters? */
call SYMBOL(MterpShouldSwitchInterpreters)
diff --git a/runtime/interpreter/mterp/x86/entry.S b/runtime/interpreter/mterp/x86/entry.S
index 34adf53b99..055e834fed 100644
--- a/runtime/interpreter/mterp/x86/entry.S
+++ b/runtime/interpreter/mterp/x86/entry.S
@@ -53,15 +53,14 @@ SYMBOL(ExecuteMterpImpl):
/* Remember the code_item */
movl IN_ARG1(%esp), %ecx
- movl %ecx, SHADOWFRAME_CODE_ITEM_OFFSET(%edx)
+ movl %ecx, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(%edx)
/* set up "named" registers */
movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(%edx), %eax
leal SHADOWFRAME_VREGS_OFFSET(%edx), rFP
leal (rFP, %eax, 4), rREFS
movl SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
- lea CODEITEM_INSNS_OFFSET(%ecx), rPC
- lea (rPC, %eax, 2), rPC
+ lea (%ecx, %eax, 2), rPC
EXPORT_PC
/* Set up for backwards branches & osr profiling */
diff --git a/runtime/interpreter/mterp/x86/footer.S b/runtime/interpreter/mterp/x86/footer.S
index 088cb127dc..0b08cf98a3 100644
--- a/runtime/interpreter/mterp/x86/footer.S
+++ b/runtime/interpreter/mterp/x86/footer.S
@@ -115,10 +115,9 @@ MterpException:
call SYMBOL(MterpHandleException)
testb %al, %al
jz MterpExceptionReturn
- movl OFF_FP_CODE_ITEM(rFP), %eax
+ movl OFF_FP_DEX_INSTRUCTIONS(rFP), %eax
movl OFF_FP_DEX_PC(rFP), %ecx
- lea CODEITEM_INSNS_OFFSET(%eax), rPC
- lea (rPC, %ecx, 2), rPC
+ lea (%eax, %ecx, 2), rPC
movl rPC, OFF_FP_DEX_PC_PTR(rFP)
/* Do we need to switch interpreters? */
call SYMBOL(MterpShouldSwitchInterpreters)
diff --git a/runtime/interpreter/mterp/x86/header.S b/runtime/interpreter/mterp/x86/header.S
index 3a2dcb7188..370012f324 100644
--- a/runtime/interpreter/mterp/x86/header.S
+++ b/runtime/interpreter/mterp/x86/header.S
@@ -128,7 +128,7 @@ unspecified registers or condition codes.
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
diff --git a/runtime/interpreter/mterp/x86_64/entry.S b/runtime/interpreter/mterp/x86_64/entry.S
index 0f969eb79f..83b845b702 100644
--- a/runtime/interpreter/mterp/x86_64/entry.S
+++ b/runtime/interpreter/mterp/x86_64/entry.S
@@ -50,15 +50,14 @@ SYMBOL(ExecuteMterpImpl):
movq IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
/* Remember the code_item */
- movq IN_ARG1, SHADOWFRAME_CODE_ITEM_OFFSET(IN_ARG2)
+ movq IN_ARG1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(IN_ARG2)
/* set up "named" registers */
movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
leaq SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
leaq (rFP, %rax, 4), rREFS
movl SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
- leaq CODEITEM_INSNS_OFFSET(IN_ARG1), rPC
- leaq (rPC, %rax, 2), rPC
+ leaq (IN_ARG1, %rax, 2), rPC
EXPORT_PC
/* Starting ibase */
diff --git a/runtime/interpreter/mterp/x86_64/footer.S b/runtime/interpreter/mterp/x86_64/footer.S
index ac6cd19f4e..3cc75321cf 100644
--- a/runtime/interpreter/mterp/x86_64/footer.S
+++ b/runtime/interpreter/mterp/x86_64/footer.S
@@ -98,10 +98,9 @@ MterpException:
call SYMBOL(MterpHandleException)
testb %al, %al
jz MterpExceptionReturn
- movq OFF_FP_CODE_ITEM(rFP), %rax
+ movq OFF_FP_DEX_INSTRUCTIONS(rFP), %rax
mov OFF_FP_DEX_PC(rFP), %ecx
- leaq CODEITEM_INSNS_OFFSET(%rax), rPC
- leaq (rPC, %rcx, 2), rPC
+ leaq (%rax, %rcx, 2), rPC
movq rPC, OFF_FP_DEX_PC_PTR(rFP)
/* Do we need to switch interpreters? */
call SYMBOL(MterpShouldSwitchInterpreters)
diff --git a/runtime/interpreter/mterp/x86_64/header.S b/runtime/interpreter/mterp/x86_64/header.S
index f229e84eb8..9d21f3f1a1 100644
--- a/runtime/interpreter/mterp/x86_64/header.S
+++ b/runtime/interpreter/mterp/x86_64/header.S
@@ -124,7 +124,7 @@ unspecified registers or condition codes.
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index 80fdadb0a7..88275cc6d4 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -92,7 +92,7 @@ class ShadowFrame {
}
uint32_t GetDexPC() const {
- return (dex_pc_ptr_ == nullptr) ? dex_pc_ : dex_pc_ptr_ - code_item_->insns_;
+ return (dex_pc_ptr_ == nullptr) ? dex_pc_ : dex_pc_ptr_ - dex_instructions_;
}
int16_t GetCachedHotnessCountdown() const {
@@ -146,12 +146,8 @@ class ShadowFrame {
return &vregs_[i + NumberOfVRegs()];
}
- void SetCodeItem(const DexFile::CodeItem* code_item) {
- code_item_ = code_item;
- }
-
- const DexFile::CodeItem* GetCodeItem() const {
- return code_item_;
+ const uint16_t* GetDexInstructions() const {
+ return dex_instructions_;
}
float GetVRegFloat(size_t i) const {
@@ -324,8 +320,8 @@ class ShadowFrame {
return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_);
}
- static size_t CodeItemOffset() {
- return OFFSETOF_MEMBER(ShadowFrame, code_item_);
+ static size_t DexInstructionsOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, dex_instructions_);
}
static size_t CachedHotnessCountdownOffset() {
@@ -372,7 +368,7 @@ class ShadowFrame {
method_(method),
result_register_(nullptr),
dex_pc_ptr_(nullptr),
- code_item_(nullptr),
+ dex_instructions_(nullptr),
number_of_vregs_(num_vregs),
dex_pc_(dex_pc),
cached_hotness_countdown_(0),
@@ -403,7 +399,8 @@ class ShadowFrame {
ArtMethod* method_;
JValue* result_register_;
const uint16_t* dex_pc_ptr_;
- const DexFile::CodeItem* code_item_;
+ // Dex instruction base of the code item.
+ const uint16_t* dex_instructions_;
LockCountData lock_count_data_; // This may contain GC roots when lock counting is active.
const uint32_t number_of_vregs_;
uint32_t dex_pc_;
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 74a7a66046..31e7986770 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -47,7 +47,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "nth_caller_visitor.h"
#include "reflection.h"
#include "thread-inl.h"
@@ -1636,6 +1636,18 @@ void UnstartedRuntime::UnstartedSystemIdentityHashCode(
result->SetI((obj != nullptr) ? obj->IdentityHashCode() : 0);
}
+// Checks whether the runtime is s64-bit. This is needed for the clinit of
+// java.lang.invoke.VarHandle clinit. The clinit determines sets of
+// available VarHandle accessors and these differ based on machine
+// word size.
+void UnstartedRuntime::UnstartedJNIVMRuntimeIs64Bit(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ jboolean is64bit = (pointer_size == PointerSize::k64) ? JNI_TRUE : JNI_FALSE;
+ result->SetZ(is64bit);
+}
+
void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(
Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args, JValue* result) {
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index e7047c7372..c029e07432 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -80,6 +80,7 @@
// Methods that are native.
#define UNSTARTED_RUNTIME_JNI_LIST(V) \
+ V(VMRuntimeIs64Bit, "boolean dalvik.system.VMRuntime.is64Bit()") \
V(VMRuntimeNewUnpaddedArray, "java.lang.Object dalvik.system.VMRuntime.newUnpaddedArray(java.lang.Class, int)") \
V(VMStackGetCallingClassLoader, "java.lang.ClassLoader dalvik.system.VMStack.getCallingClassLoader()") \
V(VMStackGetStackClass2, "java.lang.Class dalvik.system.VMStack.getStackClass2()") \
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index fb378251ad..9db5f88dab 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -393,7 +393,6 @@ TEST_F(UnstartedRuntimeTest, StringInit) {
// create instruction data for invoke-direct {v0, v1} of method with fake index
uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
- DexInstructionIterator inst(inst_data);
JValue result;
ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, method, 0);
@@ -403,7 +402,12 @@ TEST_F(UnstartedRuntimeTest, StringInit) {
shadow_frame->SetVRegReference(0, reference_empty_string);
shadow_frame->SetVRegReference(1, string_arg);
- interpreter::DoCall<false, false>(method, self, *shadow_frame, &*inst, inst_data[0], &result);
+ interpreter::DoCall<false, false>(method,
+ self,
+ *shadow_frame,
+ Instruction::At(inst_data),
+ inst_data[0],
+ &result);
mirror::String* string_result = reinterpret_cast<mirror::String*>(result.GetL());
EXPECT_EQ(string_arg->GetLength(), string_result->GetLength());
@@ -1027,12 +1031,16 @@ TEST_F(UnstartedRuntimeTest, FloatConversion) {
// create instruction data for invoke-direct {v0, v1} of method with fake index
uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
- DexInstructionIterator inst(inst_data);
JValue result;
ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, method, 0);
shadow_frame->SetVRegDouble(0, 1.23);
- interpreter::DoCall<false, false>(method, self, *shadow_frame, &*inst, inst_data[0], &result);
+ interpreter::DoCall<false, false>(method,
+ self,
+ *shadow_frame,
+ Instruction::At(inst_data),
+ inst_data[0],
+ &result);
ObjPtr<mirror::String> string_result = reinterpret_cast<mirror::String*>(result.GetL());
ASSERT_TRUE(string_result != nullptr);
@@ -1187,12 +1195,11 @@ class UnstartedClassForNameTest : public UnstartedRuntimeTest {
// create instruction data for invoke-direct {v0} of method with fake index
uint16_t inst_data[3] = { 0x1070, 0x0000, 0x0010 };
- DexInstructionIterator inst(inst_data);
interpreter::DoCall<false, false>(boot_cp_init,
self,
*shadow_frame,
- &*inst,
+ Instruction::At(inst_data),
inst_data[0],
&result);
CHECK(!self->IsExceptionPending());
diff --git a/compiler/intrinsics_enum.h b/runtime/intrinsics_enum.h
index 55281812e4..d46d0cc00f 100644
--- a/compiler/intrinsics_enum.h
+++ b/runtime/intrinsics_enum.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_INTRINSICS_ENUM_H_
-#define ART_COMPILER_INTRINSICS_ENUM_H_
+#ifndef ART_RUNTIME_INTRINSICS_ENUM_H_
+#define ART_RUNTIME_INTRINSICS_ENUM_H_
namespace art {
@@ -32,4 +32,4 @@ std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic);
} // namespace art
-#endif // ART_COMPILER_INTRINSICS_ENUM_H_
+#endif // ART_RUNTIME_INTRINSICS_ENUM_H_
diff --git a/compiler/intrinsics_list.h b/runtime/intrinsics_list.h
index bfefead394..d007728750 100644
--- a/compiler/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -14,23 +14,76 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_INTRINSICS_LIST_H_
-#define ART_COMPILER_INTRINSICS_LIST_H_
-
-// All intrinsics supported by ART. Format is name, then whether it is expected
-// to be a HInvokeStaticOrDirect node (compared to HInvokeVirtual), then whether it requires an
-// environment, may have side effects, or may throw exceptions.
+#ifndef ART_RUNTIME_INTRINSICS_LIST_H_
+#define ART_RUNTIME_INTRINSICS_LIST_H_
+// This file defines the set of intrinsics that are supported by ART
+// in the compiler and runtime. Neither compiler nor runtime has
+// intrinsics for all methods here.
+//
+// The entries in the INTRINSICS_LIST below have the following format:
+//
+// 1. name
+// 2. invocation-type (art::InvokeType value).
+// 3. needs-environment (art::IntrinsicNeedsEnvironmentOrCache value)
+// 4. side-effects (art::IntrinsicSideEffects value)
+// 5. exception-info (art::::IntrinsicExceptions value)
+// 6. declaring class descriptor
+// 7. method name
+// 8. method descriptor
+//
+// The needs-environment, side-effects and exception-info are compiler
+// related properties (compiler/optimizing/nodes.h) that should not be
+// used outside of the compiler.
+//
// Note: adding a new intrinsic requires an art image version change,
// as the modifiers flag for some ArtMethods will need to be changed.
+//
+// Note: j.l.Integer.valueOf says kNoThrow even though it could throw an
+// OOME. The kNoThrow should be renamed to kNoVisibleThrow, as it is ok to
+// GVN Integer.valueOf (kNoSideEffects), and it is also OK to remove it if
+// it's unused.
+//
+// Note: Thread.interrupted is marked with kAllSideEffects due to the lack
+// of finer grain side effects representation.
-// Note: j.l.Integer.valueOf says kNoThrow even though it could throw an OOME.
-// The kNoThrow should be renamed to kNoVisibleThrow, as it is ok to GVN Integer.valueOf
-// (kNoSideEffects), and it is also OK to remove it if it's unused.
-
-// Note: Thread.interrupted is marked with kAllSideEffects due to the lack of finer grain
-// side effects representation.
+// Intrinsics for methods with signature polymorphic behaviours.
+#define SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V) \
+ V(MethodHandleInvokeExact, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/MethodHandle;", "invokeExact", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(MethodHandleInvoke, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/MethodHandle;", "invoke", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleCompareAndExchange, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "compareAndExchange", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleCompareAndExchangeAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "compareAndExchangeAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleCompareAndExchangeRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "compareAndExchangeRelease", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleCompareAndSet, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "compareAndSet", "([Ljava/lang/Object;)Z") \
+ V(VarHandleGet, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "get", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndAdd, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndAdd", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndAddAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndAddAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndAddRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndAddRelease", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseAnd, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseAnd", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseAndAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseAndAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseAndRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseAndRelease", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseOr, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseOr", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseOrAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseOrAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseOrRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseOrRelease", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseXor, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseXor", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseXorAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseXorAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndBitwiseXorRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndBitwiseXorRelease", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndSet, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndSet", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndSetAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndSetAcquire", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetAndSetRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getAndSetRelease", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetOpaque, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getOpaque", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleGetVolatile, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "getVolatile", "([Ljava/lang/Object;)Ljava/lang/Object;") \
+ V(VarHandleSet, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "set", "([Ljava/lang/Object;)V") \
+ V(VarHandleSetOpaque, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "setOpaque", "([Ljava/lang/Object;)V") \
+ V(VarHandleSetRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "setRelease", "([Ljava/lang/Object;)V") \
+ V(VarHandleSetVolatile, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "setVolatile", "([Ljava/lang/Object;)V") \
+ V(VarHandleWeakCompareAndSet, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "weakCompareAndSet", "([Ljava/lang/Object;)Z") \
+ V(VarHandleWeakCompareAndSetAcquire, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "weakCompareAndSetAcquire", "([Ljava/lang/Object;)Z") \
+ V(VarHandleWeakCompareAndSetPlain, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "weakCompareAndSetPlain", "([Ljava/lang/Object;)Z") \
+ V(VarHandleWeakCompareAndSetRelease, kPolymorphic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/invoke/VarHandle;", "weakCompareAndSetRelease", "([Ljava/lang/Object;)Z")
+// The complete list of intrinsics.
#define INTRINSICS_LIST(V) \
V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToRawLongBits", "(D)J") \
V(DoubleDoubleToLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToLongBits", "(D)J") \
@@ -164,6 +217,7 @@
V(VarHandleReleaseFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "releaseFence", "()V") \
V(VarHandleLoadLoadFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "loadLoadFence", "()V") \
V(VarHandleStoreStoreFence, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "storeStoreFence", "()V") \
+ SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V)
-#endif // ART_COMPILER_INTRINSICS_LIST_H_
-#undef ART_COMPILER_INTRINSICS_LIST_H_ // #define is only for lint.
+#endif // ART_RUNTIME_INTRINSICS_LIST_H_
+#undef ART_RUNTIME_INTRINSICS_LIST_H_ // #define is only for lint.
diff --git a/runtime/invoke_type.h b/runtime/invoke_type.h
index a003f7fe9e..2b877e6f51 100644
--- a/runtime/invoke_type.h
+++ b/runtime/invoke_type.h
@@ -22,12 +22,13 @@
namespace art {
enum InvokeType : uint32_t {
- kStatic, // <<static>>
- kDirect, // <<direct>>
- kVirtual, // <<virtual>>
- kSuper, // <<super>>
- kInterface, // <<interface>>
- kMaxInvokeType = kInterface
+ kStatic, // <<static>>
+ kDirect, // <<direct>>
+ kVirtual, // <<virtual>>
+ kSuper, // <<super>>
+ kInterface, // <<interface>>
+ kPolymorphic, // <<polymorphic>>
+ kMaxInvokeType = kPolymorphic
};
std::ostream& operator<<(std::ostream& os, const InvokeType& rhs);
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 73746e18ef..f8b82ed313 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -36,8 +36,8 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "nativebridge/native_bridge.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "nativeloader/native_loader.h"
#include "object_callbacks.h"
#include "parsed_options.h"
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 97a3b717e2..72b5a942fe 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -272,9 +272,12 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(!method->IsRuntimeMethod());
+ RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
// Don't compile the method if it has breakpoints.
- if (Dbg::IsDebuggerActive() && Dbg::MethodHasAnyBreakpoints(method)) {
- VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to breakpoint";
+ if (cb->IsMethodBeingInspected(method) && !cb->IsMethodSafeToJit(method)) {
+ VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
+ << " due to not being safe to jit according to runtime-callbacks. For example, there"
+ << " could be breakpoints in this method.";
return false;
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 47615f56fe..e1807525ea 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1223,8 +1223,8 @@ bool JitCodeCache::CheckLiveCompiledCodeHasProfilingInfo() {
}
OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
- static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
- if (kRuntimeISA == kArm) {
+ static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
+ if (kRuntimeISA == InstructionSet::kArm) {
// On Thumb-2, the pc is offset by one.
--pc;
}
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 19501de81b..805b9c185a 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -34,6 +34,7 @@
#include "base/arena_allocator.h"
#include "base/dumpable.h"
+#include "base/file_utils.h"
#include "base/mutex.h"
#include "base/scoped_flock.h"
#include "base/stl_util.h"
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index ad013244c3..1344ca05b4 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -45,9 +45,7 @@ bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocatio
std::vector<uint32_t> entries;
- IterationRange<DexInstructionIterator> instructions = method->GetCodeItem()->Instructions();
- for (auto inst = instructions.begin(); inst != instructions.end(); ++inst) {
- const uint32_t dex_pc = inst.GetDexPC(instructions.begin());
+ for (const DexInstructionPcPair& inst : method->GetCodeItem()->Instructions()) {
switch (inst->Opcode()) {
case Instruction::INVOKE_VIRTUAL:
case Instruction::INVOKE_VIRTUAL_RANGE:
@@ -55,7 +53,7 @@ bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocatio
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
case Instruction::INVOKE_INTERFACE:
case Instruction::INVOKE_INTERFACE_RANGE:
- entries.push_back(dex_pc);
+ entries.push_back(inst.DexPc());
break;
default:
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index d74cec325a..5164c85b60 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -49,7 +49,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "parsed_options.h"
#include "reflection.h"
#include "runtime.h"
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 3f00450319..1ecfe7cb76 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -24,7 +24,7 @@
#include "java_vm_ext.h"
#include "jni_env_ext.h"
#include "mirror/string-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
namespace art {
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 743604cc47..7f68d2faa0 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -34,6 +34,7 @@
#include "base/allocator.h"
#include "base/bit_utils.h"
+#include "base/file_utils.h"
#include "base/memory_tool.h"
#include "globals.h"
#include "utils.h"
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 65f39e4468..5a5d5713a8 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -355,15 +355,6 @@ inline bool ConvertAndCopyArgumentsFromCallerFrame(
num_method_params);
}
-inline bool IsMethodHandleInvokeExact(const ArtMethod* const method) {
- if (method == jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact)) {
- return true;
- } else {
- DCHECK_EQ(method, jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invoke));
- return false;
- }
-}
-
inline bool IsInvoke(const mirror::MethodHandle::Kind handle_kind) {
return handle_kind <= mirror::MethodHandle::Kind::kLastInvokeKind;
}
@@ -416,15 +407,14 @@ static inline bool IsCallerTransformer(Handle<mirror::MethodType> callsite_type)
}
template <bool is_range>
-static inline bool DoCallPolymorphic(ArtMethod* called_method,
- Handle<mirror::MethodType> callsite_type,
- Handle<mirror::MethodType> target_type,
- Thread* self,
- ShadowFrame& shadow_frame,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+static inline bool MethodHandleInvokeMethod(ArtMethod* called_method,
+ Handle<mirror::MethodType> callsite_type,
+ Handle<mirror::MethodType> target_type,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
// Compute method information.
const DexFile::CodeItem* code_item = called_method->GetCodeItem();
@@ -552,15 +542,15 @@ static inline bool DoCallPolymorphic(ArtMethod* called_method,
}
template <bool is_range>
-static inline bool DoCallTransform(ArtMethod* called_method,
- Handle<mirror::MethodType> callsite_type,
- Handle<mirror::MethodType> callee_type,
- Thread* self,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> receiver,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
+static inline bool MethodHandleInvokeTransform(ArtMethod* called_method,
+ Handle<mirror::MethodType> callsite_type,
+ Handle<mirror::MethodType> callee_type,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> receiver,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
// This can be fixed to two, because the method we're calling here
// (MethodHandle.transformInternal) doesn't have any locals and the signature
@@ -753,34 +743,34 @@ bool DoInvokePolymorphicMethod(Thread* self,
Handle<mirror::MethodType> callee_type =
(handle_kind == mirror::MethodHandle::Kind::kInvokeCallSiteTransform) ? callsite_type
: handle_type;
- return DoCallTransform<is_range>(called_method,
- callsite_type,
- callee_type,
- self,
- shadow_frame,
- method_handle /* receiver */,
- args,
- first_arg,
- result);
+ return MethodHandleInvokeTransform<is_range>(called_method,
+ callsite_type,
+ callee_type,
+ self,
+ shadow_frame,
+ method_handle /* receiver */,
+ args,
+ first_arg,
+ result);
} else {
- return DoCallPolymorphic<is_range>(called_method,
- callsite_type,
- handle_type,
- self,
- shadow_frame,
- args,
- first_arg,
- result);
+ return MethodHandleInvokeMethod<is_range>(called_method,
+ callsite_type,
+ handle_type,
+ self,
+ shadow_frame,
+ args,
+ first_arg,
+ result);
}
}
// Helper for getters in invoke-polymorphic.
-inline static void DoFieldGetForInvokePolymorphic(Thread* self,
- const ShadowFrame& shadow_frame,
- ObjPtr<mirror::Object>& obj,
- ArtField* field,
- Primitive::Type field_type,
- JValue* result)
+inline static void MethodHandleFieldGet(Thread* self,
+ const ShadowFrame& shadow_frame,
+ ObjPtr<mirror::Object>& obj,
+ ArtField* field,
+ Primitive::Type field_type,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
switch (field_type) {
case Primitive::kPrimBoolean:
@@ -817,12 +807,12 @@ inline static void DoFieldGetForInvokePolymorphic(Thread* self,
}
// Helper for setters in invoke-polymorphic.
-inline bool DoFieldPutForInvokePolymorphic(Thread* self,
- ShadowFrame& shadow_frame,
- ObjPtr<mirror::Object>& obj,
- ArtField* field,
- Primitive::Type field_type,
- JValue& value)
+inline bool MethodHandleFieldPut(Thread* self,
+ ShadowFrame& shadow_frame,
+ ObjPtr<mirror::Object>& obj,
+ ArtField* field,
+ Primitive::Type field_type,
+ JValue& value)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!Runtime::Current()->IsActiveTransaction());
static const bool kTransaction = false; // Not in a transaction.
@@ -895,14 +885,13 @@ static JValue GetValueFromShadowFrame(const ShadowFrame& shadow_frame,
}
template <bool is_range, bool do_conversions>
-bool DoInvokePolymorphicFieldAccess(Thread* self,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> method_handle,
- Handle<mirror::MethodType> callsite_type,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+bool MethodHandleFieldAccess(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::MethodType> handle_type(hs.NewHandle(method_handle->GetMethodType()));
const mirror::MethodHandle::Kind handle_kind = method_handle->GetHandleKind();
@@ -913,7 +902,7 @@ bool DoInvokePolymorphicFieldAccess(Thread* self,
case mirror::MethodHandle::kInstanceGet: {
size_t obj_reg = is_range ? first_arg : args[0];
ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(obj_reg);
- DoFieldGetForInvokePolymorphic(self, shadow_frame, obj, field, field_type, result);
+ MethodHandleFieldGet(self, shadow_frame, obj, field, field_type, result);
if (do_conversions && !ConvertReturnValue(callsite_type, handle_type, result)) {
DCHECK(self->IsExceptionPending());
return false;
@@ -926,7 +915,7 @@ bool DoInvokePolymorphicFieldAccess(Thread* self,
DCHECK(self->IsExceptionPending());
return false;
}
- DoFieldGetForInvokePolymorphic(self, shadow_frame, obj, field, field_type, result);
+ MethodHandleFieldGet(self, shadow_frame, obj, field, field_type, result);
if (do_conversions && !ConvertReturnValue(callsite_type, handle_type, result)) {
DCHECK(self->IsExceptionPending());
return false;
@@ -951,7 +940,7 @@ bool DoInvokePolymorphicFieldAccess(Thread* self,
return false;
}
ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(obj_reg);
- return DoFieldPutForInvokePolymorphic(self, shadow_frame, obj, field, field_type, value);
+ return MethodHandleFieldPut(self, shadow_frame, obj, field, field_type, value);
}
case mirror::MethodHandle::kStaticPut: {
ObjPtr<mirror::Object> obj = GetAndInitializeDeclaringClass(self, field);
@@ -974,7 +963,7 @@ bool DoInvokePolymorphicFieldAccess(Thread* self,
DCHECK(self->IsExceptionPending());
return false;
}
- return DoFieldPutForInvokePolymorphic(self, shadow_frame, obj, field, field_type, value);
+ return MethodHandleFieldPut(self, shadow_frame, obj, field, field_type, value);
}
default:
LOG(FATAL) << "Unreachable: " << handle_kind;
@@ -983,26 +972,24 @@ bool DoInvokePolymorphicFieldAccess(Thread* self,
}
template <bool is_range>
-static inline bool DoInvokePolymorphicNonExact(Thread* self,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> method_handle,
- Handle<mirror::MethodType> callsite_type,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
+static inline bool MethodHandleInvokeInternal(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
const mirror::MethodHandle::Kind handle_kind = method_handle->GetHandleKind();
- ObjPtr<mirror::MethodType> handle_type(method_handle->GetMethodType());
- CHECK(handle_type != nullptr);
-
if (IsFieldAccess(handle_kind)) {
+ ObjPtr<mirror::MethodType> handle_type(method_handle->GetMethodType());
DCHECK(!callsite_type->IsExactMatch(handle_type.Ptr()));
if (!callsite_type->IsConvertible(handle_type.Ptr())) {
ThrowWrongMethodTypeException(handle_type.Ptr(), callsite_type.Get());
return false;
}
const bool do_convert = true;
- return DoInvokePolymorphicFieldAccess<is_range, do_convert>(
+ return MethodHandleFieldAccess<is_range, do_convert>(
self,
shadow_frame,
method_handle,
@@ -1011,7 +998,6 @@ static inline bool DoInvokePolymorphicNonExact(Thread* self,
first_arg,
result);
}
-
return DoInvokePolymorphicMethod<is_range>(self,
shadow_frame,
method_handle,
@@ -1022,27 +1008,32 @@ static inline bool DoInvokePolymorphicNonExact(Thread* self,
}
template <bool is_range>
-bool DoInvokePolymorphicExact(Thread* self,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> method_handle,
- Handle<mirror::MethodType> callsite_type,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
+static inline bool MethodHandleInvokeExactInternal(
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
- const mirror::MethodHandle::Kind handle_kind = method_handle->GetHandleKind();
Handle<mirror::MethodType> method_handle_type(hs.NewHandle(method_handle->GetMethodType()));
+ if (!callsite_type->IsExactMatch(method_handle_type.Get())) {
+ ThrowWrongMethodTypeException(method_handle_type.Get(), callsite_type.Get());
+ return false;
+ }
+
+ const mirror::MethodHandle::Kind handle_kind = method_handle->GetHandleKind();
if (IsFieldAccess(handle_kind)) {
const bool do_convert = false;
- return DoInvokePolymorphicFieldAccess<is_range, do_convert>(
- self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
+ return MethodHandleFieldAccess<is_range, do_convert>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
}
// Slow-path check.
@@ -1120,77 +1111,77 @@ bool DoInvokePolymorphicExact(Thread* self,
} // namespace
template <bool is_range>
-bool DoInvokePolymorphic(Thread* self,
- ArtMethod* invoke_method,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> method_handle,
- Handle<mirror::MethodType> callsite_type,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
+inline bool MethodHandleInvoke(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::MethodType> method_handle_type = method_handle->GetMethodType();
- if (IsMethodHandleInvokeExact(invoke_method)) {
- // We need to check the nominal type of the handle in addition to the
- // real type. The "nominal" type is present when MethodHandle.asType is
- // called any handle, and results in the declared type of the handle
- // changing.
- ObjPtr<mirror::MethodType> nominal_type(method_handle->GetNominalType());
- if (UNLIKELY(nominal_type != nullptr)) {
- if (UNLIKELY(!callsite_type->IsExactMatch(nominal_type.Ptr()))) {
- ThrowWrongMethodTypeException(nominal_type.Ptr(), callsite_type.Get());
- return false;
- }
-
- if (LIKELY(!nominal_type->IsExactMatch(method_handle_type.Ptr()))) {
- // Different nominal type means we have to treat as non-exact.
- return DoInvokePolymorphicNonExact<is_range>(self,
+ if (UNLIKELY(callsite_type->IsExactMatch(method_handle->GetMethodType()))) {
+ // A non-exact invoke that can be invoked exactly.
+ return MethodHandleInvokeExactInternal<is_range>(self,
shadow_frame,
method_handle,
callsite_type,
args,
first_arg,
result);
- }
- }
-
- if (!callsite_type->IsExactMatch(method_handle_type.Ptr())) {
- ThrowWrongMethodTypeException(method_handle_type.Ptr(), callsite_type.Get());
- return false;
- }
- return DoInvokePolymorphicExact<is_range>(self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
} else {
- if (UNLIKELY(callsite_type->IsExactMatch(method_handle_type.Ptr()))) {
- // A non-exact invoke that can be invoked exactly.
- return DoInvokePolymorphicExact<is_range>(self,
+ return MethodHandleInvokeInternal<is_range>(self,
shadow_frame,
method_handle,
callsite_type,
args,
first_arg,
result);
+ }
+}
+
+template <bool is_range>
+bool MethodHandleInvokeExact(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // We need to check the nominal type of the handle in addition to the
+ // real type. The "nominal" type is present when MethodHandle.asType is
+ // called any handle, and results in the declared type of the handle
+ // changing.
+ ObjPtr<mirror::MethodType> nominal_type(method_handle->GetNominalType());
+ if (UNLIKELY(nominal_type != nullptr)) {
+ if (UNLIKELY(!callsite_type->IsExactMatch(nominal_type.Ptr()))) {
+ ThrowWrongMethodTypeException(nominal_type.Ptr(), callsite_type.Get());
+ return false;
+ }
+ if (LIKELY(!nominal_type->IsExactMatch(method_handle->GetMethodType()))) {
+ // Different nominal type means we have to treat as non-exact.
+ return MethodHandleInvokeInternal<is_range>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
}
- return DoInvokePolymorphicNonExact<is_range>(self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
}
+ return MethodHandleInvokeExactInternal<is_range>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
}
-#define EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(_is_range) \
+#define EXPLICIT_DO_METHOD_HANDLE_METHOD(_name, _is_range) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
- bool DoInvokePolymorphic<_is_range>( \
+ bool MethodHandle##_name<_is_range>( \
Thread* self, \
- ArtMethod* invoke_method, \
ShadowFrame& shadow_frame, \
Handle<mirror::MethodHandle> method_handle, \
Handle<mirror::MethodType> callsite_type, \
@@ -1198,8 +1189,10 @@ bool DoInvokePolymorphic(Thread* self,
uint32_t first_arg, \
JValue* result)
-EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(true);
-EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(false);
-#undef EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL
+EXPLICIT_DO_METHOD_HANDLE_METHOD(Invoke, true);
+EXPLICIT_DO_METHOD_HANDLE_METHOD(Invoke, false);
+EXPLICIT_DO_METHOD_HANDLE_METHOD(InvokeExact, true);
+EXPLICIT_DO_METHOD_HANDLE_METHOD(InvokeExact, false);
+#undef EXPLICIT_DO_METHOD_HANDLE_METHOD
} // namespace art
diff --git a/runtime/method_handles.h b/runtime/method_handles.h
index 55680f09e7..8641918f1b 100644
--- a/runtime/method_handles.h
+++ b/runtime/method_handles.h
@@ -202,14 +202,23 @@ class ShadowFrameSetter {
};
template <bool is_range>
-bool DoInvokePolymorphic(Thread* self,
- ArtMethod* invoke_method,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> method_handle,
- Handle<mirror::MethodType> callsite_type,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
+bool MethodHandleInvoke(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+template <bool is_range>
+bool MethodHandleInvokeExact(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace art
diff --git a/runtime/mirror/emulated_stack_frame.cc b/runtime/mirror/emulated_stack_frame.cc
index a6129ccc5f..f82bfbfaef 100644
--- a/runtime/mirror/emulated_stack_frame.cc
+++ b/runtime/mirror/emulated_stack_frame.cc
@@ -289,7 +289,7 @@ void EmulatedStackFrame::VisitRoots(RootVisitor* visitor) {
static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
-// Explicit DoInvokePolymorphic template function declarations.
+// Explicit CreateFromShadowFrameAndArgs template function declarations.
#define EXPLICIT_CREATE_FROM_SHADOW_FRAME_AND_ARGS_DECL(_is_range) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
mirror::EmulatedStackFrame* EmulatedStackFrame::CreateFromShadowFrameAndArgs<_is_range>( \
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index ad48202514..bcd2db4dbd 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -31,7 +31,7 @@ template <PointerSize kPointerSize, bool kTransactionActive>
inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field, bool force_resolve) {
StackHandleScope<2> hs(self);
// Try to resolve type before allocating since this is a thread suspension point.
- Handle<mirror::Class> type = hs.NewHandle(field->GetType<true>());
+ Handle<mirror::Class> type = hs.NewHandle(field->ResolveType());
if (type == nullptr) {
if (force_resolve) {
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 78ef339b8c..87cc620309 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -239,7 +239,8 @@ void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr<Object>
if (field.GetOffset().Int32Value() == field_offset.Int32Value()) {
CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot);
// TODO: resolve the field type for moving GC.
- ObjPtr<mirror::Class> field_type = field.GetType<!kMovingCollector>();
+ ObjPtr<mirror::Class> field_type =
+ kMovingCollector ? field.LookupType() : field.ResolveType();
if (field_type != nullptr) {
CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
}
@@ -256,7 +257,8 @@ void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr<Object>
if (field.GetOffset().Int32Value() == field_offset.Int32Value()) {
CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot);
// TODO: resolve the field type for moving GC.
- ObjPtr<mirror::Class> field_type = field.GetType<!kMovingCollector>();
+ ObjPtr<mirror::Class> field_type =
+ kMovingCollector ? field.LookupType() : field.ResolveType();
if (field_type != nullptr) {
CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
}
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
new file mode 100644
index 0000000000..e7eac1a5ab
--- /dev/null
+++ b/runtime/mirror/var_handle.cc
@@ -0,0 +1,399 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "var_handle.h"
+
+#include "class-inl.h"
+#include "class_linker.h"
+#include "gc_root-inl.h"
+#include "method_type.h"
+
+namespace art {
+namespace mirror {
+
+namespace {
+
+// Enumeration for describing the parameter and return types of an AccessMode.
+enum class AccessModeTemplate : uint32_t {
+ kGet, // T Op(C0..CN)
+ kSet, // void Op(C0..CN, T)
+ kCompareAndSet, // boolean Op(C0..CN, T, T)
+ kCompareAndExchange, // T Op(C0..CN, T, T)
+ kGetAndUpdate, // T Op(C0..CN, T)
+};
+
+// Look up the AccessModeTemplate for a given VarHandle
+// AccessMode. This simplifies finding the correct signature for a
+// VarHandle accessor method.
+AccessModeTemplate GetAccessModeTemplate(VarHandle::AccessMode access_mode) {
+ switch (access_mode) {
+ case VarHandle::AccessMode::kGet:
+ return AccessModeTemplate::kGet;
+ case VarHandle::AccessMode::kSet:
+ return AccessModeTemplate::kSet;
+ case VarHandle::AccessMode::kGetVolatile:
+ return AccessModeTemplate::kGet;
+ case VarHandle::AccessMode::kSetVolatile:
+ return AccessModeTemplate::kSet;
+ case VarHandle::AccessMode::kGetAcquire:
+ return AccessModeTemplate::kGet;
+ case VarHandle::AccessMode::kSetRelease:
+ return AccessModeTemplate::kSet;
+ case VarHandle::AccessMode::kGetOpaque:
+ return AccessModeTemplate::kGet;
+ case VarHandle::AccessMode::kSetOpaque:
+ return AccessModeTemplate::kSet;
+ case VarHandle::AccessMode::kCompareAndSet:
+ return AccessModeTemplate::kCompareAndSet;
+ case VarHandle::AccessMode::kCompareAndExchange:
+ return AccessModeTemplate::kCompareAndExchange;
+ case VarHandle::AccessMode::kCompareAndExchangeAcquire:
+ return AccessModeTemplate::kCompareAndExchange;
+ case VarHandle::AccessMode::kCompareAndExchangeRelease:
+ return AccessModeTemplate::kCompareAndExchange;
+ case VarHandle::AccessMode::kWeakCompareAndSetPlain:
+ return AccessModeTemplate::kCompareAndSet;
+ case VarHandle::AccessMode::kWeakCompareAndSet:
+ return AccessModeTemplate::kCompareAndSet;
+ case VarHandle::AccessMode::kWeakCompareAndSetAcquire:
+ return AccessModeTemplate::kCompareAndSet;
+ case VarHandle::AccessMode::kWeakCompareAndSetRelease:
+ return AccessModeTemplate::kCompareAndSet;
+ case VarHandle::AccessMode::kGetAndSet:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndSetAcquire:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndSetRelease:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndAdd:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndAddAcquire:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndAddRelease:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndBitwiseOr:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndBitwiseOrRelease:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndBitwiseOrAcquire:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndBitwiseAnd:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndBitwiseAndRelease:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndBitwiseAndAcquire:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndBitwiseXor:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndBitwiseXorRelease:
+ return AccessModeTemplate::kGetAndUpdate;
+ case VarHandle::AccessMode::kGetAndBitwiseXorAcquire:
+ return AccessModeTemplate::kGetAndUpdate;
+ }
+}
+
+// Returns the number of parameters associated with an
+// AccessModeTemplate and the supplied coordinate types.
+int32_t GetParameterCount(AccessModeTemplate access_mode_template,
+ ObjPtr<Class> coordinateType0,
+ ObjPtr<Class> coordinateType1) {
+ int32_t index = 0;
+ if (!coordinateType0.IsNull()) {
+ index++;
+ if (!coordinateType1.IsNull()) {
+ index++;
+ }
+ }
+
+ switch (access_mode_template) {
+ case AccessModeTemplate::kGet:
+ return index;
+ case AccessModeTemplate::kSet:
+ case AccessModeTemplate::kGetAndUpdate:
+ return index + 1;
+ case AccessModeTemplate::kCompareAndSet:
+ case AccessModeTemplate::kCompareAndExchange:
+ return index + 2;
+ }
+ UNREACHABLE();
+}
+
+// Writes the parameter types associated with the AccessModeTemplate
+// into an array. The parameter types are derived from the specified
+// variable type and coordinate types. Returns the number of
+// parameters written.
+int32_t BuildParameterArray(ObjPtr<Class> (&parameters)[VarHandle::kMaxAccessorParameters],
+ AccessModeTemplate access_mode_template,
+ ObjPtr<Class> varType,
+ ObjPtr<Class> coordinateType0,
+ ObjPtr<Class> coordinateType1)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(varType != nullptr);
+ int32_t index = 0;
+ if (!coordinateType0.IsNull()) {
+ parameters[index++] = coordinateType0;
+ if (!coordinateType1.IsNull()) {
+ parameters[index++] = coordinateType1;
+ }
+ } else {
+ DCHECK(coordinateType1.IsNull());
+ }
+
+ switch (access_mode_template) {
+ case AccessModeTemplate::kCompareAndExchange:
+ case AccessModeTemplate::kCompareAndSet:
+ parameters[index++] = varType;
+ parameters[index++] = varType;
+ return index;
+ case AccessModeTemplate::kGet:
+ return index;
+ case AccessModeTemplate::kGetAndUpdate:
+ case AccessModeTemplate::kSet:
+ parameters[index++] = varType;
+ return index;
+ }
+ return -1;
+}
+
+// Returns the return type associated with an AccessModeTemplate based
+// on the template and the variable type specified.
+Class* GetReturnType(AccessModeTemplate access_mode_template, ObjPtr<Class> varType)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(varType != nullptr);
+ switch (access_mode_template) {
+ case AccessModeTemplate::kCompareAndSet:
+ return Runtime::Current()->GetClassLinker()->FindPrimitiveClass('Z');
+ case AccessModeTemplate::kCompareAndExchange:
+ case AccessModeTemplate::kGet:
+ case AccessModeTemplate::kGetAndUpdate:
+ return varType.Ptr();
+ case AccessModeTemplate::kSet:
+ return Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V');
+ }
+ return nullptr;
+}
+
+ObjectArray<Class>* NewArrayOfClasses(Thread* self, int count)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* const class_linker = runtime->GetClassLinker();
+ ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass();
+ ObjPtr<mirror::Class> array_of_class = class_linker->FindArrayClass(self, &class_type);
+ return ObjectArray<Class>::Alloc(Thread::Current(), array_of_class, count);
+}
+
+} // namespace
+
+Class* VarHandle::GetVarType() {
+ return GetFieldObject<Class>(VarTypeOffset());
+}
+
+Class* VarHandle::GetCoordinateType0() {
+ return GetFieldObject<Class>(CoordinateType0Offset());
+}
+
+Class* VarHandle::GetCoordinateType1() {
+ return GetFieldObject<Class>(CoordinateType1Offset());
+}
+
+int32_t VarHandle::GetAccessModesBitMask() {
+ return GetField32(AccessModesBitMaskOffset());
+}
+
+bool VarHandle::IsMethodTypeCompatible(AccessMode access_mode, MethodType* method_type) {
+ ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+
+ AccessModeTemplate access_mode_template = GetAccessModeTemplate(access_mode);
+ // Check return types first.
+ ObjPtr<Class> var_type = GetVarType();
+ ObjPtr<Class> vh_rtype = GetReturnType(access_mode_template, var_type);
+ ObjPtr<Class> void_type = Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V');
+ ObjPtr<Class> mt_rtype = method_type->GetRType();
+
+ // If the mt_rtype is void, the result of the operation will be discarded (okay).
+ if (mt_rtype != void_type && mt_rtype != vh_rtype) {
+ return false;
+ }
+
+ // Check the number of parameters matches.
+ ObjPtr<Class> vh_ptypes[VarHandle::kMaxAccessorParameters];
+ const int32_t vh_ptypes_count = BuildParameterArray(vh_ptypes,
+ access_mode_template,
+ var_type,
+ GetCoordinateType0(),
+ GetCoordinateType1());
+ if (vh_ptypes_count != method_type->GetPTypes()->GetLength()) {
+ return false;
+ }
+
+ // Check the parameter types match.
+ ObjPtr<ObjectArray<Class>> mt_ptypes = method_type->GetPTypes();
+ for (int32_t i = 0; i < vh_ptypes_count; ++i) {
+ if (mt_ptypes->Get(i) != vh_ptypes[i].Ptr()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+MethodType* VarHandle::GetMethodTypeForAccessMode(Thread* self,
+ ObjPtr<VarHandle> var_handle,
+ AccessMode access_mode) {
+ // This is a static as the var_handle might be moved by the GC during it's execution.
+ AccessModeTemplate access_mode_template = GetAccessModeTemplate(access_mode);
+
+ StackHandleScope<3> hs(self);
+ Handle<VarHandle> vh = hs.NewHandle(var_handle);
+ Handle<Class> rtype = hs.NewHandle(GetReturnType(access_mode_template, vh->GetVarType()));
+ const int32_t ptypes_count =
+ GetParameterCount(access_mode_template, vh->GetCoordinateType0(), vh->GetCoordinateType1());
+ Handle<ObjectArray<Class>> ptypes = hs.NewHandle(NewArrayOfClasses(self, ptypes_count));
+ if (ptypes == nullptr) {
+ return nullptr;
+ }
+
+ ObjPtr<Class> ptypes_array[VarHandle::kMaxAccessorParameters];
+ BuildParameterArray(ptypes_array,
+ access_mode_template,
+ vh->GetVarType(),
+ vh->GetCoordinateType0(),
+ vh->GetCoordinateType1());
+ for (int32_t i = 0; i < ptypes_count; ++i) {
+ ptypes->Set(i, ptypes_array[i].Ptr());
+ }
+ return MethodType::Create(self, rtype, ptypes);
+}
+
+MethodType* VarHandle::GetMethodTypeForAccessMode(Thread* self, AccessMode access_mode) {
+ return GetMethodTypeForAccessMode(self, this, access_mode);
+}
+
+void VarHandle::SetClass(Class* klass) {
+ CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ static_class_ = GcRoot<Class>(klass);
+}
+
+void VarHandle::ResetClass() {
+ CHECK(!static_class_.IsNull());
+ static_class_ = GcRoot<Class>(nullptr);
+}
+
+void VarHandle::VisitRoots(RootVisitor* visitor) {
+ static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+}
+
+GcRoot<Class> VarHandle::static_class_;
+
+ArtField* FieldVarHandle::GetField() {
+ uintptr_t opaque_field = static_cast<uintptr_t>(GetField64(ArtFieldOffset()));
+ return reinterpret_cast<ArtField*>(opaque_field);
+}
+
+Class* FieldVarHandle::StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return static_class_.Read();
+}
+
+void FieldVarHandle::SetClass(Class* klass) {
+ CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ static_class_ = GcRoot<Class>(klass);
+}
+
+void FieldVarHandle::ResetClass() {
+ CHECK(!static_class_.IsNull());
+ static_class_ = GcRoot<Class>(nullptr);
+}
+
+void FieldVarHandle::VisitRoots(RootVisitor* visitor) {
+ static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+}
+
+GcRoot<Class> FieldVarHandle::static_class_;
+
+Class* ArrayElementVarHandle::StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return static_class_.Read();
+}
+
+void ArrayElementVarHandle::SetClass(Class* klass) {
+ CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ static_class_ = GcRoot<Class>(klass);
+}
+
+void ArrayElementVarHandle::ResetClass() {
+ CHECK(!static_class_.IsNull());
+ static_class_ = GcRoot<Class>(nullptr);
+}
+
+void ArrayElementVarHandle::VisitRoots(RootVisitor* visitor) {
+ static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+}
+
+GcRoot<Class> ArrayElementVarHandle::static_class_;
+
+bool ByteArrayViewVarHandle::GetNativeByteOrder() {
+ return GetFieldBoolean(NativeByteOrderOffset());
+}
+
+Class* ByteArrayViewVarHandle::StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return static_class_.Read();
+}
+
+void ByteArrayViewVarHandle::SetClass(Class* klass) {
+ CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ static_class_ = GcRoot<Class>(klass);
+}
+
+void ByteArrayViewVarHandle::ResetClass() {
+ CHECK(!static_class_.IsNull());
+ static_class_ = GcRoot<Class>(nullptr);
+}
+
+void ByteArrayViewVarHandle::VisitRoots(RootVisitor* visitor) {
+ static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+}
+
+GcRoot<Class> ByteArrayViewVarHandle::static_class_;
+
+bool ByteBufferViewVarHandle::GetNativeByteOrder() {
+ return GetFieldBoolean(NativeByteOrderOffset());
+}
+
+Class* ByteBufferViewVarHandle::StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return static_class_.Read();
+}
+
+void ByteBufferViewVarHandle::SetClass(Class* klass) {
+ CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ static_class_ = GcRoot<Class>(klass);
+}
+
+void ByteBufferViewVarHandle::ResetClass() {
+ CHECK(!static_class_.IsNull());
+ static_class_ = GcRoot<Class>(nullptr);
+}
+
+void ByteBufferViewVarHandle::VisitRoots(RootVisitor* visitor) {
+ static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+}
+
+GcRoot<Class> ByteBufferViewVarHandle::static_class_;
+
+} // namespace mirror
+} // namespace art
diff --git a/runtime/mirror/var_handle.h b/runtime/mirror/var_handle.h
new file mode 100644
index 0000000000..a2a5d8c9ff
--- /dev/null
+++ b/runtime/mirror/var_handle.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_VAR_HANDLE_H_
+#define ART_RUNTIME_MIRROR_VAR_HANDLE_H_
+
+#include "handle.h"
+#include "gc_root.h"
+#include "object.h"
+
+namespace art {
+
+template<class T> class Handle;
+struct VarHandleOffsets;
+struct FieldVarHandleOffsets;
+struct ByteArrayViewVarHandleOffsets;
+struct ByteBufferViewVarHandleOffsets;
+
+namespace mirror {
+
+class MethodType;
+class VarHandleTest;
+
+// C++ mirror of java.lang.invoke.VarHandle
+class MANAGED VarHandle : public Object {
+ public:
+ // The maximum number of parameters a VarHandle accessor method can
+ // take. The Worst case is equivalent to a compare-and-swap
+ // operation on an array element which requires four parameters
+ // (array, index, old, new).
+ static constexpr int kMaxAccessorParameters = 4;
+
+ // Enumeration of the possible access modes. This mirrors the enum
+ // in java.lang.invoke.VarHandle.
+ enum class AccessMode : uint32_t {
+ kGet,
+ kSet,
+ kGetVolatile,
+ kSetVolatile,
+ kGetAcquire,
+ kSetRelease,
+ kGetOpaque,
+ kSetOpaque,
+ kCompareAndSet,
+ kCompareAndExchange,
+ kCompareAndExchangeAcquire,
+ kCompareAndExchangeRelease,
+ kWeakCompareAndSetPlain,
+ kWeakCompareAndSet,
+ kWeakCompareAndSetAcquire,
+ kWeakCompareAndSetRelease,
+ kGetAndSet,
+ kGetAndSetAcquire,
+ kGetAndSetRelease,
+ kGetAndAdd,
+ kGetAndAddAcquire,
+ kGetAndAddRelease,
+ kGetAndBitwiseOr,
+ kGetAndBitwiseOrRelease,
+ kGetAndBitwiseOrAcquire,
+ kGetAndBitwiseAnd,
+ kGetAndBitwiseAndRelease,
+ kGetAndBitwiseAndAcquire,
+ kGetAndBitwiseXor,
+ kGetAndBitwiseXorRelease,
+ kGetAndBitwiseXorAcquire,
+ };
+
+ // Returns true if the AccessMode specified is a supported operation.
+ bool IsAccessModeSupported(AccessMode accessMode) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return (GetAccessModesBitMask() & (1u << static_cast<uint32_t>(accessMode))) != 0;
+ }
+
+ // Returns true if the MethodType specified is compatible with the
+ // method type associated with the specified AccessMode. The
+ // supplied MethodType is assumed to be from the point of invocation
+ // so it is valid for the supplied MethodType to have a void return
+ // value when the return value for the AccessMode is non-void. This
+ // corresponds to the result of the accessor being discarded.
+ bool IsMethodTypeCompatible(AccessMode access_mode, MethodType* method_type)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Allocates and returns the MethodType associated with the
+ // AccessMode. No check is made for whether the AccessMode is a
+ // supported operation so the MethodType can be used when raising a
+ // WrongMethodTypeException exception.
+ MethodType* GetMethodTypeForAccessMode(Thread* self, AccessMode accessMode)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+
+ static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return static_class_.Read();
+ }
+
+ static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ Class* GetVarType() REQUIRES_SHARED(Locks::mutator_lock_);
+ Class* GetCoordinateType0() REQUIRES_SHARED(Locks::mutator_lock_);
+ Class* GetCoordinateType1() REQUIRES_SHARED(Locks::mutator_lock_);
+ int32_t GetAccessModesBitMask() REQUIRES_SHARED(Locks::mutator_lock_);
+
+ static MethodType* GetMethodTypeForAccessMode(Thread* self,
+ ObjPtr<VarHandle> var_handle,
+ AccessMode access_mode)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ static MemberOffset VarTypeOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(VarHandle, var_type_));
+ }
+
+ static MemberOffset CoordinateType0Offset() {
+ return MemberOffset(OFFSETOF_MEMBER(VarHandle, coordinate_type0_));
+ }
+
+ static MemberOffset CoordinateType1Offset() {
+ return MemberOffset(OFFSETOF_MEMBER(VarHandle, coordinate_type1_));
+ }
+
+ static MemberOffset AccessModesBitMaskOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(VarHandle, access_modes_bit_mask_));
+ }
+
+ HeapReference<mirror::Class> coordinate_type0_;
+ HeapReference<mirror::Class> coordinate_type1_;
+ HeapReference<mirror::Class> var_type_;
+ int32_t access_modes_bit_mask_;
+
+ // Root representing java.lang.invoke.VarHandle.class.
+ static GcRoot<mirror::Class> static_class_;
+
+ friend class VarHandleTest; // for testing purposes
+ friend struct art::VarHandleOffsets; // for verifying offset information
+ DISALLOW_IMPLICIT_CONSTRUCTORS(VarHandle);
+};
+
+// Represents a VarHandle to a static or instance field.
+// The corresponding managed class in libart java.lang.invoke.FieldVarHandle.
+class MANAGED FieldVarHandle : public VarHandle {
+ public:
+ ArtField* GetField() REQUIRES_SHARED(Locks::mutator_lock_);
+
+ static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ static MemberOffset ArtFieldOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(FieldVarHandle, art_field_));
+ }
+
+ // ArtField instance corresponding to variable for accessors.
+ int64_t art_field_;
+
+ // Root representing java.lang.invoke.FieldVarHandle.class.
+ static GcRoot<mirror::Class> static_class_;
+
+ friend class VarHandleTest; // for var_handle_test.
+ friend struct art::FieldVarHandleOffsets; // for verifying offset information
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FieldVarHandle);
+};
+
+// Represents a VarHandle providing accessors to an array.
+// The corresponding managed class in libart java.lang.invoke.ArrayElementVarHandle.
+class MANAGED ArrayElementVarHandle : public VarHandle {
+ public:
+ static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ // Root representing java.lang.invoke.ArrayElementVarHandle.class.
+ static GcRoot<mirror::Class> static_class_;
+
+ friend class VarHandleTest;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayElementVarHandle);
+};
+
+// Represents a VarHandle providing accessors to a view of a ByteArray.
+// The corresponding managed class in libart java.lang.invoke.ByteArrayViewVarHandle.
+class MANAGED ByteArrayViewVarHandle : public VarHandle {
+ public:
+ bool GetNativeByteOrder() REQUIRES_SHARED(Locks::mutator_lock_);
+
+ static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ static MemberOffset NativeByteOrderOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(ByteArrayViewVarHandle, native_byte_order_));
+ }
+
+ // Flag indicating that accessors should use native byte-ordering.
+ uint8_t native_byte_order_;
+
+ // Root representing java.lang.invoke.ByteArrayViewVarHandle.class.
+ static GcRoot<mirror::Class> static_class_;
+
+ friend class VarHandleTest; // for var_handle_test.
+ friend struct art::ByteArrayViewVarHandleOffsets; // for verifying offset information
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArrayViewVarHandle);
+};
+
+// Represents a VarHandle providing accessors to a view of a ByteBuffer
+// The corresponding managed class in libart java.lang.invoke.ByteBufferViewVarHandle.
+class MANAGED ByteBufferViewVarHandle : public VarHandle {
+ public:
+ bool GetNativeByteOrder() REQUIRES_SHARED(Locks::mutator_lock_);
+
+ static ByteBufferViewVarHandle* Create(Thread* const self, bool native_byte_order)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+
+ static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ static MemberOffset NativeByteOrderOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(ByteBufferViewVarHandle, native_byte_order_));
+ }
+
+ // Flag indicating that accessors should use native byte-ordering.
+ uint8_t native_byte_order_;
+
+ // Root representing java.lang.invoke.ByteBufferViewVarHandle.class.
+ static GcRoot<mirror::Class> static_class_;
+
+ friend class VarHandleTest; // for var_handle_test.
+ friend struct art::ByteBufferViewVarHandleOffsets; // for verifying offset information
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ByteBufferViewVarHandle);
+};
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_VAR_HANDLE_H_
diff --git a/runtime/mirror/var_handle_test.cc b/runtime/mirror/var_handle_test.cc
new file mode 100644
index 0000000000..159f80c4fd
--- /dev/null
+++ b/runtime/mirror/var_handle_test.cc
@@ -0,0 +1,991 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "var_handle.h"
+
+#include <string>
+#include <vector>
+
+#include "art_field-inl.h"
+#include "class-inl.h"
+#include "class_linker-inl.h"
+#include "class_loader.h"
+#include "common_runtime_test.h"
+#include "handle_scope-inl.h"
+#include "jvalue-inl.h"
+#include "method_type.h"
+#include "object_array-inl.h"
+#include "reflection.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace art {
+namespace mirror {
+
+// Tests for mirror::VarHandle and it's descendents.
+class VarHandleTest : public CommonRuntimeTest {
+ public:
+ static FieldVarHandle* CreateFieldVarHandle(Thread* const self,
+ ArtField* art_field,
+ int32_t access_modes_bit_mask)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_) {
+ StackHandleScope<4> hs(self);
+ Handle<FieldVarHandle> fvh = hs.NewHandle(
+ ObjPtr<FieldVarHandle>::DownCast(FieldVarHandle::StaticClass()->AllocObject(self)));
+ Handle<Class> var_type = hs.NewHandle(art_field->ResolveType());
+
+ if (art_field->IsStatic()) {
+ InitializeVarHandle(fvh.Get(), var_type, access_modes_bit_mask);
+ } else {
+ Handle<Class> declaring_type = hs.NewHandle(art_field->GetDeclaringClass().Ptr());
+ InitializeVarHandle(fvh.Get(),
+ var_type,
+ declaring_type,
+ access_modes_bit_mask);
+ }
+ uintptr_t opaque_field = reinterpret_cast<uintptr_t>(art_field);
+ fvh->SetField64<false>(FieldVarHandle::ArtFieldOffset(), opaque_field);
+ return fvh.Get();
+ }
+
+ static ArrayElementVarHandle* CreateArrayElementVarHandle(Thread* const self,
+ Handle<Class> array_class,
+ int32_t access_modes_bit_mask)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_) {
+ StackHandleScope<3> hs(self);
+ Handle<ArrayElementVarHandle> vh = hs.NewHandle(
+ ObjPtr<ArrayElementVarHandle>::DownCast(
+ ArrayElementVarHandle::StaticClass()->AllocObject(self)));
+
+ // Initialize super class fields
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Handle<Class> var_type = hs.NewHandle(array_class->GetComponentType());
+ Handle<Class> index_type = hs.NewHandle(class_linker->FindPrimitiveClass('I'));
+ InitializeVarHandle(vh.Get(), var_type, array_class, index_type, access_modes_bit_mask);
+ return vh.Get();
+ }
+
+ static ByteArrayViewVarHandle* CreateByteArrayViewVarHandle(Thread* const self,
+ Handle<Class> view_array_class,
+ bool native_byte_order,
+ int32_t access_modes_bit_mask)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_) {
+ StackHandleScope<4> hs(self);
+ Handle<ByteArrayViewVarHandle> bvh = hs.NewHandle(
+ ObjPtr<ByteArrayViewVarHandle>::DownCast(
+ ByteArrayViewVarHandle::StaticClass()->AllocObject(self)));
+
+ // Initialize super class fields
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Handle<Class> var_type = hs.NewHandle(view_array_class->GetComponentType());
+ Handle<Class> index_type = hs.NewHandle(class_linker->FindPrimitiveClass('I'));
+ ObjPtr<mirror::Class> byte_class = class_linker->FindPrimitiveClass('B');
+ Handle<Class> byte_array_class(hs.NewHandle(class_linker->FindArrayClass(self, &byte_class)));
+ InitializeVarHandle(bvh.Get(), var_type, byte_array_class, index_type, access_modes_bit_mask);
+ bvh->SetFieldBoolean<false>(ByteArrayViewVarHandle::NativeByteOrderOffset(), native_byte_order);
+ return bvh.Get();
+ }
+
+ static ByteBufferViewVarHandle* CreateByteBufferViewVarHandle(Thread* const self,
+ Handle<Class> view_array_class,
+ bool native_byte_order,
+ int32_t access_modes_bit_mask)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_) {
+ StackHandleScope<5> hs(self);
+ Handle<ByteBufferViewVarHandle> bvh = hs.NewHandle(
+ ObjPtr<ByteBufferViewVarHandle>::DownCast(
+ ByteArrayViewVarHandle::StaticClass()->AllocObject(self)));
+ // Initialize super class fields
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Handle<Class> var_type = hs.NewHandle(view_array_class->GetComponentType());
+ Handle<Class> index_type = hs.NewHandle(class_linker->FindPrimitiveClass('I'));
+ Handle<ClassLoader> boot_class_loader;
+ Handle<Class> byte_buffer_class = hs.NewHandle(
+ class_linker->FindSystemClass(self, "Ljava/nio/ByteBuffer;"));
+ InitializeVarHandle(bvh.Get(), var_type, byte_buffer_class, index_type, access_modes_bit_mask);
+ bvh->SetFieldBoolean<false>(ByteBufferViewVarHandle::NativeByteOrderOffset(),
+ native_byte_order);
+ return bvh.Get();
+ }
+
+ static int32_t AccessModesBitMask(VarHandle::AccessMode mode) {
+ return 1 << static_cast<int32_t>(mode);
+ }
+
+ template<typename... Args>
+ static int32_t AccessModesBitMask(VarHandle::AccessMode first, Args... args) {
+ return AccessModesBitMask(first) | AccessModesBitMask(args...);
+ }
+
+ // Helper to get the VarType of a VarHandle.
+ static Class* GetVarType(VarHandle* vh) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return vh->GetVarType();
+ }
+
+ // Helper to get the CoordinateType0 of a VarHandle.
+ static Class* GetCoordinateType0(VarHandle* vh) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return vh->GetCoordinateType0();
+ }
+
+ // Helper to get the CoordinateType1 of a VarHandle.
+ static Class* GetCoordinateType1(VarHandle* vh) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return vh->GetCoordinateType1();
+ }
+
+ // Helper to get the AccessModesBitMask of a VarHandle.
+ static int32_t GetAccessModesBitMask(VarHandle* vh) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return vh->GetAccessModesBitMask();
+ }
+
+ private:
+ static void InitializeVarHandle(VarHandle* vh,
+ Handle<Class> var_type,
+ int32_t access_modes_bit_mask)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ vh->SetFieldObject<false>(VarHandle::VarTypeOffset(), var_type.Get());
+ vh->SetField32<false>(VarHandle::AccessModesBitMaskOffset(), access_modes_bit_mask);
+ }
+
+ static void InitializeVarHandle(VarHandle* vh,
+ Handle<Class> var_type,
+ Handle<Class> coordinate_type0,
+ int32_t access_modes_bit_mask)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ InitializeVarHandle(vh, var_type, access_modes_bit_mask);
+ vh->SetFieldObject<false>(VarHandle::CoordinateType0Offset(), coordinate_type0.Get());
+ }
+
+ static void InitializeVarHandle(VarHandle* vh,
+ Handle<Class> var_type,
+ Handle<Class> coordinate_type0,
+ Handle<Class> coordinate_type1,
+ int32_t access_modes_bit_mask)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ InitializeVarHandle(vh, var_type, access_modes_bit_mask);
+ vh->SetFieldObject<false>(VarHandle::CoordinateType0Offset(), coordinate_type0.Get());
+ vh->SetFieldObject<false>(VarHandle::CoordinateType1Offset(), coordinate_type1.Get());
+ }
+};
+
+// Convenience method for constructing MethodType instances from
+// well-formed method descriptors.
+static MethodType* MethodTypeOf(const std::string& method_descriptor) {
+ std::vector<std::string> descriptors;
+
+ auto it = method_descriptor.cbegin();
+ if (*it++ != '(') {
+ LOG(FATAL) << "Bad descriptor: " << method_descriptor;
+ }
+
+ bool returnValueSeen = false;
+ const char* prefix = "";
+ for (; it != method_descriptor.cend() && !returnValueSeen; ++it) {
+ switch (*it) {
+ case ')':
+ descriptors.push_back(std::string(++it, method_descriptor.cend()));
+ returnValueSeen = true;
+ break;
+ case '[':
+ prefix = "[";
+ break;
+ case 'Z':
+ case 'B':
+ case 'C':
+ case 'S':
+ case 'I':
+ case 'J':
+ case 'F':
+ case 'D':
+ descriptors.push_back(prefix + std::string(it, it + 1));
+ prefix = "";
+ break;
+ case 'L': {
+ auto last = it + 1;
+ while (*last != ';') {
+ ++last;
+ }
+ descriptors.push_back(prefix + std::string(it, last + 1));
+ prefix = "";
+ it = last;
+ break;
+ }
+ default:
+ LOG(FATAL) << "Bad descriptor: " << method_descriptor;
+ }
+ }
+
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* const class_linker = runtime->GetClassLinker();
+ Thread* const self = Thread::Current();
+
+ ScopedObjectAccess soa(self);
+ StackHandleScope<3> hs(self);
+ int ptypes_count = static_cast<int>(descriptors.size()) - 1;
+ ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass();
+ ObjPtr<mirror::Class> array_of_class = class_linker->FindArrayClass(self, &class_type);
+ Handle<ObjectArray<Class>> ptypes = hs.NewHandle(
+ ObjectArray<Class>::Alloc(Thread::Current(), array_of_class, ptypes_count));
+ Handle<mirror::ClassLoader> boot_class_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
+ for (int i = 0; i < ptypes_count; ++i) {
+ ptypes->Set(i, class_linker->FindClass(self, descriptors[i].c_str(), boot_class_loader));
+ }
+ Handle<Class> rtype =
+ hs.NewHandle(class_linker->FindClass(self, descriptors.back().c_str(), boot_class_loader));
+ return MethodType::Create(self, rtype, ptypes);
+}
+
+TEST_F(VarHandleTest, InstanceFieldVarHandle) {
+ Thread * const self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ ObjPtr<Object> i = BoxPrimitive(Primitive::kPrimInt, JValue::FromPrimitive<int32_t>(37));
+ ArtField* value = mirror::Class::FindField(self, i->GetClass(), "value", "I");
+ int32_t mask = AccessModesBitMask(VarHandle::AccessMode::kGet,
+ VarHandle::AccessMode::kGetAndSet,
+ VarHandle::AccessMode::kGetAndBitwiseXor);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::FieldVarHandle> fvh(hs.NewHandle(CreateFieldVarHandle(self, value, mask)));
+ EXPECT_FALSE(fvh.IsNull());
+ EXPECT_EQ(value, fvh->GetField());
+
+ // Check access modes
+ EXPECT_TRUE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGet));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kSet));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetVolatile));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kSetVolatile));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAcquire));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kSetRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetOpaque));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kSetOpaque));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndSet));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchange));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchangeAcquire));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchangeRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetPlain));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSet));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetAcquire));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetRelease));
+ EXPECT_TRUE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSet));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSetAcquire));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSetRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAdd));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAddAcquire));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAddRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOr));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOrRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOrAcquire));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAnd));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAndRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAndAcquire));
+ EXPECT_TRUE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXor));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXorRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXorAcquire));
+
+ // Check compatibility - "Get" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)I")));
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)V")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)Z")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+ }
+
+ // Check compatibility - "Set" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)V")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)V")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)Z")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ }
+
+ // Check compatibility - "CompareAndSet" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;II)Z")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode,
+ MethodTypeOf("(Ljava/lang/Integer;II)I")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)Z")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ }
+
+ // Check compatibility - "CompareAndExchange" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;II)I")));
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;II)V")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)Z")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(III)V")));
+ }
+
+ // Check compatibility - "GetAndUpdate" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)I")));
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)V")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)Z")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+ }
+
+ // Check synthesized method types match expected forms.
+ {
+ MethodType* get = MethodTypeOf("(Ljava/lang/Integer;)I");
+ MethodType* set = MethodTypeOf("(Ljava/lang/Integer;I)V");
+ MethodType* compareAndSet = MethodTypeOf("(Ljava/lang/Integer;II)Z");
+ MethodType* compareAndExchange = MethodTypeOf("(Ljava/lang/Integer;II)I");
+ MethodType* getAndUpdate = MethodTypeOf("(Ljava/lang/Integer;I)I");
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGet)->IsExactMatch(get));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSet)->IsExactMatch(set));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetVolatile)->IsExactMatch(get));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetVolatile)->IsExactMatch(set));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAcquire)->IsExactMatch(get));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetRelease)->IsExactMatch(set));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetOpaque)->IsExactMatch(get));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetOpaque)->IsExactMatch(set));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndSet)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchange)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchangeAcquire)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchangeRelease)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetPlain)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSet)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetAcquire)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetRelease)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSet)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSetAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSetRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAdd)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAddAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAddRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOr)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOrRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOrAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAnd)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAndRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAndAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXor)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXorRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXorAcquire)->IsExactMatch(getAndUpdate));
+ }
+}
+
+TEST_F(VarHandleTest, StaticFieldVarHandle) {
+ Thread * const self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ ObjPtr<Object> i = BoxPrimitive(Primitive::kPrimInt, JValue::FromPrimitive<int32_t>(37));
+ ArtField* value = mirror::Class::FindField(self, i->GetClass(), "MIN_VALUE", "I");
+ int32_t mask = AccessModesBitMask(VarHandle::AccessMode::kSet,
+ VarHandle::AccessMode::kGetOpaque,
+ VarHandle::AccessMode::kGetAndBitwiseAndRelease);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::FieldVarHandle> fvh(hs.NewHandle(CreateFieldVarHandle(self, value, mask)));
+ EXPECT_FALSE(fvh.IsNull());
+ EXPECT_EQ(value, fvh->GetField());
+
+ // Check access modes
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGet));
+ EXPECT_TRUE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kSet));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetVolatile));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kSetVolatile));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAcquire));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kSetRelease));
+ EXPECT_TRUE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetOpaque));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kSetOpaque));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndSet));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchange));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchangeAcquire));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchangeRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetPlain));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSet));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetAcquire));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSet));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSetAcquire));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSetRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAdd));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAddAcquire));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAddRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOr));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOrRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOrAcquire));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAnd));
+ EXPECT_TRUE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAndRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAndAcquire));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXor));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXorRelease));
+ EXPECT_FALSE(fvh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXorAcquire));
+
+ // Check compatibility - "Get" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()I")));
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()V")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()Z")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+ }
+
+ // Check compatibility - "Set" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(I)V")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()V")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()Z")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(F)V")));
+ }
+
+ // Check compatibility - "CompareAndSet" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)Z")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode,
+ MethodTypeOf("(II)Ljava/lang/String;")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()Z")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ }
+
+ // Check compatibility - "CompareAndExchange" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)I")));
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(ID)I")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)D")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(III)V")));
+ }
+
+ // Check compatibility - "GetAndUpdate" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(I)I")));
+ EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(I)V")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(I)Z")));
+ EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+ }
+
+ // Check synthesized method types match expected forms.
+ {
+ MethodType* get = MethodTypeOf("()I");
+ MethodType* set = MethodTypeOf("(I)V");
+ MethodType* compareAndSet = MethodTypeOf("(II)Z");
+ MethodType* compareAndExchange = MethodTypeOf("(II)I");
+ MethodType* getAndUpdate = MethodTypeOf("(I)I");
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGet)->IsExactMatch(get));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSet)->IsExactMatch(set));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetVolatile)->IsExactMatch(get));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetVolatile)->IsExactMatch(set));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAcquire)->IsExactMatch(get));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetRelease)->IsExactMatch(set));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetOpaque)->IsExactMatch(get));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetOpaque)->IsExactMatch(set));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndSet)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchange)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchangeAcquire)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchangeRelease)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetPlain)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSet)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetAcquire)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetRelease)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSet)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSetAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSetRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAdd)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAddAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAddRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOr)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOrRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOrAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAnd)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAndRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAndAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXor)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXorRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(fvh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXorAcquire)->IsExactMatch(getAndUpdate));
+ }
+}
+
+TEST_F(VarHandleTest, ArrayElementVarHandle) {
+ Thread * const self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ StackHandleScope<2> hs(self);
+
+ int32_t mask = AccessModesBitMask(VarHandle::AccessMode::kGet,
+ VarHandle::AccessMode::kSet,
+ VarHandle::AccessMode::kGetVolatile,
+ VarHandle::AccessMode::kSetVolatile,
+ VarHandle::AccessMode::kGetAcquire,
+ VarHandle::AccessMode::kSetRelease,
+ VarHandle::AccessMode::kGetOpaque,
+ VarHandle::AccessMode::kSetOpaque,
+ VarHandle::AccessMode::kCompareAndSet,
+ VarHandle::AccessMode::kCompareAndExchange,
+ VarHandle::AccessMode::kCompareAndExchangeAcquire,
+ VarHandle::AccessMode::kCompareAndExchangeRelease,
+ VarHandle::AccessMode::kWeakCompareAndSetPlain,
+ VarHandle::AccessMode::kWeakCompareAndSet,
+ VarHandle::AccessMode::kWeakCompareAndSetAcquire,
+ VarHandle::AccessMode::kWeakCompareAndSetRelease,
+ VarHandle::AccessMode::kGetAndSet,
+ VarHandle::AccessMode::kGetAndSetAcquire,
+ VarHandle::AccessMode::kGetAndSetRelease,
+ VarHandle::AccessMode::kGetAndAdd,
+ VarHandle::AccessMode::kGetAndAddAcquire,
+ VarHandle::AccessMode::kGetAndAddRelease,
+ VarHandle::AccessMode::kGetAndBitwiseOr,
+ VarHandle::AccessMode::kGetAndBitwiseOrRelease,
+ VarHandle::AccessMode::kGetAndBitwiseOrAcquire,
+ VarHandle::AccessMode::kGetAndBitwiseAnd,
+ VarHandle::AccessMode::kGetAndBitwiseAndRelease,
+ VarHandle::AccessMode::kGetAndBitwiseAndAcquire,
+ VarHandle::AccessMode::kGetAndBitwiseXor,
+ VarHandle::AccessMode::kGetAndBitwiseXorRelease,
+ VarHandle::AccessMode::kGetAndBitwiseXorAcquire);
+
+ ObjPtr<mirror::Class> string_class = mirror::String::GetJavaLangString();
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Handle<Class> string_array_class(hs.NewHandle(class_linker->FindArrayClass(self, &string_class)));
+ Handle<mirror::ArrayElementVarHandle> vh(hs.NewHandle(CreateArrayElementVarHandle(self, string_array_class, mask)));
+ EXPECT_FALSE(vh.IsNull());
+
+ // Check access modes
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGet));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kSet));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetVolatile));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kSetVolatile));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAcquire));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kSetRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetOpaque));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kSetOpaque));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndSet));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchange));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchangeAcquire));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchangeRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetPlain));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSet));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetAcquire));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSet));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSetAcquire));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSetRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAdd));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAddAcquire));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAddRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOr));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOrRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOrAcquire));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAnd));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAndRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAndAcquire));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXor));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXorRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXorAcquire));
+
+ // Check compatibility - "Get" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)Ljava/lang/String;")));
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;Ljava/lang/String;)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+ }
+
+ // Check compatibility - "Set" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ }
+
+ // Check compatibility - "CompareAndSet" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
+ EXPECT_TRUE(
+ vh->IsMethodTypeCompatible(
+ access_mode,
+ MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode,
+ MethodTypeOf("([Ljava/lang/String;III)I")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ }
+
+ // Check compatibility - "CompareAndExchange" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)Ljava/lang/String;")));
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;II)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(III)V")));
+ }
+
+ // Check compatibility - "GetAndUpdate" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)Ljava/lang/String;")));
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+ }
+
+ // Check synthesized method types match expected forms.
+ {
+ MethodType* get = MethodTypeOf("([Ljava/lang/String;I)Ljava/lang/String;");
+ MethodType* set = MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)V");
+ MethodType* compareAndSet = MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)Z");
+ MethodType* compareAndExchange = MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)Ljava/lang/String;");
+ MethodType* getAndUpdate = MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)Ljava/lang/String;");
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGet)->IsExactMatch(get));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSet)->IsExactMatch(set));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetVolatile)->IsExactMatch(get));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetVolatile)->IsExactMatch(set));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAcquire)->IsExactMatch(get));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetRelease)->IsExactMatch(set));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetOpaque)->IsExactMatch(get));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetOpaque)->IsExactMatch(set));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndSet)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchange)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchangeAcquire)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchangeRelease)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetPlain)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSet)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetAcquire)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetRelease)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSet)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSetAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSetRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAdd)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAddAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAddRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOr)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOrRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOrAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAnd)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAndRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAndAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXor)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXorRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXorAcquire)->IsExactMatch(getAndUpdate));
+ }
+}
+
+TEST_F(VarHandleTest, ByteArrayViewVarHandle) {
+ Thread * const self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ StackHandleScope<2> hs(self);
+
+ int32_t mask = AccessModesBitMask(VarHandle::AccessMode::kGet,
+ VarHandle::AccessMode::kGetVolatile,
+ VarHandle::AccessMode::kGetAcquire,
+ VarHandle::AccessMode::kGetOpaque,
+ VarHandle::AccessMode::kCompareAndSet,
+ VarHandle::AccessMode::kCompareAndExchangeAcquire,
+ VarHandle::AccessMode::kWeakCompareAndSetPlain,
+ VarHandle::AccessMode::kWeakCompareAndSetAcquire,
+ VarHandle::AccessMode::kGetAndSet,
+ VarHandle::AccessMode::kGetAndSetRelease,
+ VarHandle::AccessMode::kGetAndAddAcquire,
+ VarHandle::AccessMode::kGetAndBitwiseOr,
+ VarHandle::AccessMode::kGetAndBitwiseOrAcquire,
+ VarHandle::AccessMode::kGetAndBitwiseAndRelease,
+ VarHandle::AccessMode::kGetAndBitwiseXor,
+ VarHandle::AccessMode::kGetAndBitwiseXorAcquire);
+
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ObjPtr<mirror::Class> char_class = class_linker->FindPrimitiveClass('C');
+ Handle<Class> char_array_class(hs.NewHandle(class_linker->FindArrayClass(self, &char_class)));
+ const bool native_byte_order = true;
+ Handle<mirror::ByteArrayViewVarHandle> vh(hs.NewHandle(CreateByteArrayViewVarHandle(self, char_array_class, native_byte_order, mask)));
+ EXPECT_FALSE(vh.IsNull());
+ EXPECT_EQ(native_byte_order, vh->GetNativeByteOrder());
+
+ // Check access modes
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGet));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kSet));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetVolatile));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kSetVolatile));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAcquire));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kSetRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetOpaque));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kSetOpaque));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndSet));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchange));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchangeAcquire));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchangeRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetPlain));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSet));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetAcquire));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSet));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSetAcquire));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSetRelease));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAdd));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAddAcquire));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAddRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOr));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOrRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOrAcquire));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAnd));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAndRelease));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAndAcquire));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXor));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXorRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXorAcquire));
+
+ // Check compatibility - "Get" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)C")));
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BC)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+ }
+
+ // Check compatibility - "Set" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BIC)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ }
+
+ // Check compatibility - "CompareAndSet" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
+ EXPECT_TRUE(
+ vh->IsMethodTypeCompatible(
+ access_mode,
+ MethodTypeOf("([BICC)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode,
+ MethodTypeOf("([BIII)I")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ }
+
+ // Check compatibility - "CompareAndExchange" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BICC)C")));
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BICC)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BII)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(III)V")));
+ }
+
+ // Check compatibility - "GetAndUpdate" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BIC)C")));
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BIC)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BIC)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+ }
+
+ // Check synthesized method types match expected forms.
+ {
+ MethodType* get = MethodTypeOf("([BI)C");
+ MethodType* set = MethodTypeOf("([BIC)V");
+ MethodType* compareAndSet = MethodTypeOf("([BICC)Z");
+ MethodType* compareAndExchange = MethodTypeOf("([BICC)C");
+ MethodType* getAndUpdate = MethodTypeOf("([BIC)C");
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGet)->IsExactMatch(get));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSet)->IsExactMatch(set));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetVolatile)->IsExactMatch(get));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetVolatile)->IsExactMatch(set));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAcquire)->IsExactMatch(get));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetRelease)->IsExactMatch(set));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetOpaque)->IsExactMatch(get));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetOpaque)->IsExactMatch(set));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndSet)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchange)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchangeAcquire)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchangeRelease)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetPlain)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSet)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetAcquire)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetRelease)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSet)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSetAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSetRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAdd)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAddAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAddRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOr)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOrRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOrAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAnd)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAndRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAndAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXor)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXorRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXorAcquire)->IsExactMatch(getAndUpdate));
+ }
+}
+
+TEST_F(VarHandleTest, ByteBufferViewVarHandle) {
+ Thread * const self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ StackHandleScope<2> hs(self);
+
+ int32_t mask = AccessModesBitMask(VarHandle::AccessMode::kGet,
+ VarHandle::AccessMode::kGetVolatile,
+ VarHandle::AccessMode::kGetAcquire,
+ VarHandle::AccessMode::kGetOpaque,
+ VarHandle::AccessMode::kCompareAndSet,
+ VarHandle::AccessMode::kCompareAndExchangeAcquire,
+ VarHandle::AccessMode::kWeakCompareAndSetPlain,
+ VarHandle::AccessMode::kWeakCompareAndSetAcquire,
+ VarHandle::AccessMode::kGetAndSet,
+ VarHandle::AccessMode::kGetAndSetRelease,
+ VarHandle::AccessMode::kGetAndAddAcquire,
+ VarHandle::AccessMode::kGetAndBitwiseOr,
+ VarHandle::AccessMode::kGetAndBitwiseOrAcquire,
+ VarHandle::AccessMode::kGetAndBitwiseAndRelease,
+ VarHandle::AccessMode::kGetAndBitwiseXor,
+ VarHandle::AccessMode::kGetAndBitwiseXorAcquire);
+
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ObjPtr<mirror::Class> double_class = class_linker->FindPrimitiveClass('D');
+ Handle<Class> double_array_class(hs.NewHandle(class_linker->FindArrayClass(self, &double_class)));
+ const bool native_byte_order = false;
+ Handle<mirror::ByteBufferViewVarHandle> vh(hs.NewHandle(CreateByteBufferViewVarHandle(self, double_array_class, native_byte_order, mask)));
+ EXPECT_FALSE(vh.IsNull());
+ EXPECT_EQ(native_byte_order, vh->GetNativeByteOrder());
+
+ // Check access modes
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGet));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kSet));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetVolatile));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kSetVolatile));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAcquire));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kSetRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetOpaque));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kSetOpaque));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndSet));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchange));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchangeAcquire));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kCompareAndExchangeRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetPlain));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSet));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetAcquire));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kWeakCompareAndSetRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSet));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSetAcquire));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndSetRelease));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAdd));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAddAcquire));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndAddRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOr));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOrRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseOrAcquire));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAnd));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAndRelease));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseAndAcquire));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXor));
+ EXPECT_FALSE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXorRelease));
+ EXPECT_TRUE(vh->IsAccessModeSupported(VarHandle::AccessMode::kGetAndBitwiseXorAcquire));
+
+ // Check compatibility - "Get" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)D")));
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;D)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+ }
+
+ // Check compatibility - "Set" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;ID)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ }
+
+ // Check compatibility - "CompareAndSet" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
+ EXPECT_TRUE(
+ vh->IsMethodTypeCompatible(
+ access_mode,
+ MethodTypeOf("(Ljava/nio/ByteBuffer;IDD)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode,
+ MethodTypeOf("(Ljava/nio/ByteBuffer;IDI)D")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ }
+
+ // Check compatibility - "CompareAndExchange" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;IDD)D")));
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;IDD)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;II)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(III)V")));
+ }
+
+ // Check compatibility - "GetAndUpdate" pattern
+ {
+ const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;ID)D")));
+ EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;ID)V")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;ID)Z")));
+ EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+ }
+
+ // Check synthesized method types match expected forms.
+ {
+ MethodType* get = MethodTypeOf("(Ljava/nio/ByteBuffer;I)D");
+ MethodType* set = MethodTypeOf("(Ljava/nio/ByteBuffer;ID)V");
+ MethodType* compareAndSet = MethodTypeOf("(Ljava/nio/ByteBuffer;IDD)Z");
+ MethodType* compareAndExchange = MethodTypeOf("(Ljava/nio/ByteBuffer;IDD)D");
+ MethodType* getAndUpdate = MethodTypeOf("(Ljava/nio/ByteBuffer;ID)D");
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGet)->IsExactMatch(get));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSet)->IsExactMatch(set));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetVolatile)->IsExactMatch(get));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetVolatile)->IsExactMatch(set));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAcquire)->IsExactMatch(get));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetRelease)->IsExactMatch(set));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetOpaque)->IsExactMatch(get));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kSetOpaque)->IsExactMatch(set));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndSet)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchange)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchangeAcquire)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kCompareAndExchangeRelease)->IsExactMatch(compareAndExchange));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetPlain)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSet)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetAcquire)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kWeakCompareAndSetRelease)->IsExactMatch(compareAndSet));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSet)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSetAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndSetRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAdd)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAddAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndAddRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOr)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOrRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseOrAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAnd)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAndRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseAndAcquire)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXor)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXorRelease)->IsExactMatch(getAndUpdate));
+ EXPECT_TRUE(vh->GetMethodTypeForAccessMode(self, VarHandle::AccessMode::kGetAndBitwiseXorAcquire)->IsExactMatch(getAndUpdate));
+ }
+}
+
+} // namespace mirror
+} // namespace art
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 7823413c09..32201d97dd 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -44,7 +44,8 @@ namespace art {
using android::base::StringPrintf;
-static constexpr uint64_t kLongWaitMs = 100;
+static constexpr uint64_t kDebugThresholdFudgeFactor = kIsDebugBuild ? 10 : 1;
+static constexpr uint64_t kLongWaitMs = 100 * kDebugThresholdFudgeFactor;
/*
* Every Object has a monitor associated with it, but not every Object is actually locked. Even
@@ -78,8 +79,12 @@ uint32_t Monitor::stack_dump_lock_profiling_threshold_ = 0;
void Monitor::Init(uint32_t lock_profiling_threshold,
uint32_t stack_dump_lock_profiling_threshold) {
- lock_profiling_threshold_ = lock_profiling_threshold;
- stack_dump_lock_profiling_threshold_ = stack_dump_lock_profiling_threshold;
+ // It isn't great to always include the debug build fudge factor for command-
+ // line driven arguments, but it's easier to adjust here than in the build.
+ lock_profiling_threshold_ =
+ lock_profiling_threshold * kDebugThresholdFudgeFactor;
+ stack_dump_lock_profiling_threshold_ =
+ stack_dump_lock_profiling_threshold * kDebugThresholdFudgeFactor;
}
Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
@@ -453,7 +458,7 @@ void Monitor::Lock(Thread* self) {
// Acquire thread-list lock to find thread and keep it from dying until we've got all
// the info we need.
{
- MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
+ Locks::thread_list_lock_->ExclusiveLock(Thread::Current());
// Re-find the owner in case the thread got killed.
Thread* original_owner = Runtime::Current()->GetThreadList()->FindThreadByThreadId(
@@ -475,9 +480,15 @@ void Monitor::Lock(Thread* self) {
std::ostringstream oss;
};
CollectStackTrace owner_trace;
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its
+ // execution.
original_owner->RequestSynchronousCheckpoint(&owner_trace);
owner_stack_dump = owner_trace.oss.str();
+ } else {
+ Locks::thread_list_lock_->ExclusiveUnlock(Thread::Current());
}
+ } else {
+ Locks::thread_list_lock_->ExclusiveUnlock(Thread::Current());
}
// This is all the data we need. Now drop the thread-list lock, it's OK for the
// owner to go away now.
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index e75d097220..22355638cd 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -20,9 +20,11 @@
#include "android-base/stringprintf.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "class_linker.h"
+#include <class_loader_context.h>
#include "common_throws.h"
#include "compiler_filter.h"
#include "dex_file-inl.h"
@@ -33,8 +35,8 @@
#include "mirror/string.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "oat_file.h"
#include "oat_file_assistant.h"
#include "oat_file_manager.h"
@@ -459,6 +461,7 @@ static jint GetDexOptNeeded(JNIEnv* env,
const char* filename,
const char* instruction_set,
const char* compiler_filter_name,
+ const char* class_loader_context,
bool profile_changed,
bool downgrade) {
if ((filename == nullptr) || !OS::FileExists(filename)) {
@@ -470,7 +473,7 @@ static jint GetDexOptNeeded(JNIEnv* env,
}
const InstructionSet target_instruction_set = GetInstructionSetFromString(instruction_set);
- if (target_instruction_set == kNone) {
+ if (target_instruction_set == InstructionSet::kNone) {
ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
std::string message(StringPrintf("Instruction set %s is invalid.", instruction_set));
env->ThrowNew(iae.get(), message.c_str());
@@ -485,6 +488,19 @@ static jint GetDexOptNeeded(JNIEnv* env,
return -1;
}
+ std::unique_ptr<ClassLoaderContext> context = nullptr;
+ if (class_loader_context != nullptr) {
+ context = ClassLoaderContext::Create(class_loader_context);
+
+ if (context == nullptr) {
+ ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
+ std::string message(StringPrintf("Class loader context '%s' is invalid.",
+ class_loader_context));
+ env->ThrowNew(iae.get(), message.c_str());
+ return -1;
+ }
+ }
+
// TODO: Verify the dex location is well formed, and throw an IOException if
// not?
@@ -495,8 +511,10 @@ static jint GetDexOptNeeded(JNIEnv* env,
return OatFileAssistant::kNoDexOptNeeded;
}
- // TODO(calin): Extend DexFile.getDexOptNeeded to accept the class loader context. b/62269291.
- return oat_file_assistant.GetDexOptNeeded(filter, profile_changed, downgrade);
+ return oat_file_assistant.GetDexOptNeeded(filter,
+ profile_changed,
+ downgrade,
+ context.get());
}
static jstring DexFile_getDexFileStatus(JNIEnv* env,
@@ -515,7 +533,7 @@ static jstring DexFile_getDexFileStatus(JNIEnv* env,
const InstructionSet target_instruction_set = GetInstructionSetFromString(
instruction_set.c_str());
- if (target_instruction_set == kNone) {
+ if (target_instruction_set == InstructionSet::kNone) {
ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
std::string message(StringPrintf("Instruction set %s is invalid.", instruction_set.c_str()));
env->ThrowNew(iae.get(), message.c_str());
@@ -532,6 +550,7 @@ static jint DexFile_getDexOptNeeded(JNIEnv* env,
jstring javaFilename,
jstring javaInstructionSet,
jstring javaTargetCompilerFilter,
+ jstring javaClassLoaderContext,
jboolean newProfile,
jboolean downgrade) {
ScopedUtfChars filename(env, javaFilename);
@@ -549,10 +568,16 @@ static jint DexFile_getDexOptNeeded(JNIEnv* env,
return -1;
}
+ NullableScopedUtfChars class_loader_context(env, javaClassLoaderContext);
+ if (env->ExceptionCheck()) {
+ return -1;
+ }
+
return GetDexOptNeeded(env,
filename.c_str(),
instruction_set.c_str(),
target_compiler_filter.c_str(),
+ class_loader_context.c_str(),
newProfile == JNI_TRUE,
downgrade == JNI_TRUE);
}
@@ -681,7 +706,7 @@ static jobjectArray DexFile_getDexFileOutputPaths(JNIEnv* env,
const InstructionSet target_instruction_set = GetInstructionSetFromString(
instruction_set.c_str());
- if (target_instruction_set == kNone) {
+ if (target_instruction_set == InstructionSet::kNone) {
ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
std::string message(StringPrintf("Instruction set %s is invalid.", instruction_set.c_str()));
env->ThrowNew(iae.get(), message.c_str());
@@ -731,7 +756,7 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexFile, getClassNameList, "(Ljava/lang/Object;)[Ljava/lang/String;"),
NATIVE_METHOD(DexFile, isDexOptNeeded, "(Ljava/lang/String;)Z"),
NATIVE_METHOD(DexFile, getDexOptNeeded,
- "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;ZZ)I"),
+ "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;ZZ)I"),
NATIVE_METHOD(DexFile, openDexFileNative,
"(Ljava/lang/String;"
"Ljava/lang/String;"
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 70dd5cb56d..2663bea344 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -40,8 +40,8 @@
#include "mirror/class.h"
#include "mirror/object_array-inl.h"
#include "native_util.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "scoped_fast_native_object_access-inl.h"
#include "trace.h"
#include "well_known_classes.h"
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 4db9feb518..2d1f886896 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -22,12 +22,7 @@
extern "C" void android_set_application_target_sdk_version(uint32_t version);
#endif
#include <limits.h>
-#include "nativehelper/ScopedUtfChars.h"
-
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wshadow"
-#include "nativehelper/toStringArray.h"
-#pragma GCC diagnostic pop
+#include "nativehelper/scoped_utf_chars.h"
#include "android-base/stringprintf.h"
@@ -53,11 +48,13 @@ extern "C" void android_set_application_target_sdk_version(uint32_t version);
#include "mirror/object-inl.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
+#include "nativehelper/scoped_local_ref.h"
#include "runtime.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "thread_list.h"
+#include "well_known_classes.h"
namespace art {
@@ -166,7 +163,27 @@ static jboolean VMRuntime_isNativeDebuggable(JNIEnv*, jobject) {
}
static jobjectArray VMRuntime_properties(JNIEnv* env, jobject) {
- return toStringArray(env, Runtime::Current()->GetProperties());
+ DCHECK(WellKnownClasses::java_lang_String != nullptr);
+
+ const std::vector<std::string>& properties = Runtime::Current()->GetProperties();
+ ScopedLocalRef<jobjectArray> ret(env,
+ env->NewObjectArray(static_cast<jsize>(properties.size()),
+ WellKnownClasses::java_lang_String,
+ nullptr /* initial element */));
+ if (ret == nullptr) {
+ DCHECK(env->ExceptionCheck());
+ return nullptr;
+ }
+ for (size_t i = 0; i != properties.size(); ++i) {
+ ScopedLocalRef<jstring> str(env, env->NewStringUTF(properties[i].c_str()));
+ if (str == nullptr) {
+ DCHECK(env->ExceptionCheck());
+ return nullptr;
+ }
+ env->SetObjectArrayElement(ret.get(), static_cast<jsize>(i), str.get());
+ DCHECK(!env->ExceptionCheck());
+ }
+ return ret.release();
}
// This is for backward compatibility with dalvik which returned the
@@ -607,7 +624,7 @@ static jboolean VMRuntime_isBootClassPathOnDisk(JNIEnv* env, jclass, jstring jav
return JNI_FALSE;
}
InstructionSet isa = GetInstructionSetFromString(instruction_set.c_str());
- if (isa == kNone) {
+ if (isa == InstructionSet::kNone) {
ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
std::string message(StringPrintf("Instruction set %s is invalid.", instruction_set.c_str()));
env->ThrowNew(iae.get(), message.c_str());
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index e40a071223..a7bee39a81 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -22,14 +22,14 @@
#include "arch/instruction_set.h"
#include "art_method-inl.h"
+#include "base/logging.h"
#include "debugger.h"
#include "java_vm_ext.h"
#include "jit/jit.h"
#include "jni_internal.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/JNIHelp.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "non_debuggable_classes.h"
#include "oat_file.h"
#include "oat_file_manager.h"
@@ -49,7 +49,8 @@ namespace art {
// Set to true to always determine the non-debuggable classes even if we would not allow a debugger
// to actually attach.
-static constexpr bool kAlwaysCollectNonDebuggableClasses = kIsDebugBuild;
+static bool kAlwaysCollectNonDebuggableClasses =
+ RegisterRuntimeDebugFlag(&kAlwaysCollectNonDebuggableClasses);
using android::base::StringPrintf;
@@ -331,7 +332,7 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
ScopedUtfChars isa_string(env, instruction_set);
InstructionSet isa = GetInstructionSetFromString(isa_string.c_str());
Runtime::NativeBridgeAction action = Runtime::NativeBridgeAction::kUnload;
- if (isa != kNone && isa != kRuntimeISA) {
+ if (isa != InstructionSet::kNone && isa != kRuntimeISA) {
action = Runtime::NativeBridgeAction::kInitialize;
}
Runtime::Current()->InitNonZygoteOrPostFork(
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 1a19940993..9359ffc7fd 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -35,8 +35,8 @@
#include "mirror/string-inl.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "nth_caller_visitor.h"
#include "obj_ptr-inl.h"
#include "reflection.h"
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index 67f7c51465..9295ff7071 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -25,7 +25,7 @@
#include "mirror/string-inl.h"
#include "mirror/string.h"
#include "native_util.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "verify_object.h"
diff --git a/runtime/native/java_lang_StringFactory.cc b/runtime/native/java_lang_StringFactory.cc
index 2db9a5cc22..136a02f8f6 100644
--- a/runtime/native/java_lang_StringFactory.cc
+++ b/runtime/native/java_lang_StringFactory.cc
@@ -22,8 +22,8 @@
#include "mirror/string.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedPrimitiveArray.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_primitive_array.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 94007ffa1e..a717264bcb 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -22,7 +22,7 @@
#include "monitor.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 413149c510..5130ad50e4 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -23,8 +23,8 @@
#include "mirror/object-inl.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "obj_ptr.h"
#include "scoped_fast_native_object_access-inl.h"
#include "well_known_classes.h"
diff --git a/runtime/native/libcore_util_CharsetUtils.cc b/runtime/native/libcore_util_CharsetUtils.cc
index 9743c9413d..f3aba2575b 100644
--- a/runtime/native/libcore_util_CharsetUtils.cc
+++ b/runtime/native/libcore_util_CharsetUtils.cc
@@ -22,7 +22,7 @@
#include "mirror/string-inl.h"
#include "mirror/string.h"
#include "native_util.h"
-#include "nativehelper/ScopedPrimitiveArray.h"
+#include "nativehelper/scoped_primitive_array.h"
#include "nativehelper/jni_macros.h"
#include "scoped_fast_native_object_access-inl.h"
#include "unicode/utf16.h"
diff --git a/runtime/native/native_util.h b/runtime/native/native_util.h
index 593b3ca444..784dba319e 100644
--- a/runtime/native/native_util.h
+++ b/runtime/native/native_util.h
@@ -21,7 +21,7 @@
#include "android-base/logging.h"
#include "base/macros.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
namespace art {
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
index c3e74bd112..f8f4b1f0ad 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
@@ -21,7 +21,7 @@
#include "jni_internal.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedPrimitiveArray.h"
+#include "nativehelper/scoped_primitive_array.h"
#include "scoped_fast_native_object_access-inl.h"
namespace art {
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 8c42973509..f5057b013a 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -16,6 +16,7 @@
#include "org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/mutex.h"
#include "debugger.h"
@@ -23,8 +24,8 @@
#include "jni_internal.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedPrimitiveArray.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_primitive_array.h"
#include "scoped_fast_native_object_access-inl.h"
#include "thread_list.h"
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 7e16357376..f166714b79 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -40,6 +40,7 @@
#include "android-base/stringprintf.h"
#include "arch/instruction_set.h"
+#include "base/file_utils.h"
#include "base/memory_tool.h"
#include "base/mutex.h"
#include "base/unix_file/fd_file.h"
diff --git a/runtime/non_debuggable_classes.cc b/runtime/non_debuggable_classes.cc
index 871ffba2a4..7db199cd06 100644
--- a/runtime/non_debuggable_classes.cc
+++ b/runtime/non_debuggable_classes.cc
@@ -19,7 +19,7 @@
#include "base/logging.h"
#include "jni_internal.h"
#include "mirror/class-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr-inl.h"
#include "thread-current-inl.h"
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 21e20e9b74..39dc8da5c3 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -94,7 +94,7 @@ OatHeader::OatHeader(InstructionSet instruction_set,
memcpy(magic_, kOatMagic, sizeof(kOatMagic));
memcpy(version_, kOatVersion, sizeof(kOatVersion));
- CHECK_NE(instruction_set, kNone);
+ CHECK_NE(instruction_set, InstructionSet::kNone);
// Flatten the map. Will also update variable_size_data_size_.
Flatten(variable_data);
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 1269dcad93..d64986e76d 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -38,6 +38,7 @@
#include "art_method.h"
#include "base/bit_vector.h"
#include "base/enums.h"
+#include "base/file_utils.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index a7fe9b1205..97b2aecd82 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -23,6 +23,7 @@
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "class_linker.h"
@@ -70,15 +71,34 @@ std::ostream& operator << (std::ostream& stream, const OatFileAssistant::OatStat
OatFileAssistant::OatFileAssistant(const char* dex_location,
const InstructionSet isa,
+ bool load_executable)
+ : OatFileAssistant(dex_location,
+ isa, load_executable,
+ -1 /* vdex_fd */,
+ -1 /* oat_fd */,
+ -1 /* zip_fd */) {}
+
+
+OatFileAssistant::OatFileAssistant(const char* dex_location,
+ const InstructionSet isa,
bool load_executable,
int vdex_fd,
- int oat_fd)
+ int oat_fd,
+ int zip_fd)
: isa_(isa),
load_executable_(load_executable),
odex_(this, /*is_oat_location*/ false),
- oat_(this, /*is_oat_location*/ true) {
+ oat_(this, /*is_oat_location*/ true),
+ zip_fd_(zip_fd) {
CHECK(dex_location != nullptr) << "OatFileAssistant: null dex location";
+ if (zip_fd < 0) {
+ CHECK_LE(oat_fd, 0) << "zip_fd must be provided with valid oat_fd. zip_fd=" << zip_fd
+ << " oat_fd=" << oat_fd;
+ CHECK_LE(vdex_fd, 0) << "zip_fd must be provided with valid vdex_fd. zip_fd=" << zip_fd
+ << " vdex_fd=" << vdex_fd;;
+ }
+
// Try to get the realpath for the dex location.
//
// This is OK with respect to dalvik cache naming scheme because we never
@@ -112,18 +132,20 @@ OatFileAssistant::OatFileAssistant(const char* dex_location,
std::string error_msg;
std::string odex_file_name;
if (DexLocationToOdexFilename(dex_location_, isa_, &odex_file_name, &error_msg)) {
- odex_.Reset(odex_file_name, vdex_fd, oat_fd);
+ odex_.Reset(odex_file_name, UseFdToReadFiles(), vdex_fd, oat_fd);
} else {
LOG(WARNING) << "Failed to determine odex file name: " << error_msg;
}
- // Get the oat filename.
- std::string oat_file_name;
- if (DexLocationToOatFilename(dex_location_, isa_, &oat_file_name, &error_msg)) {
- oat_.Reset(oat_file_name);
- } else {
- LOG(WARNING) << "Failed to determine oat file name for dex location "
- << dex_location_ << ": " << error_msg;
+ if (!UseFdToReadFiles()) {
+ // Get the oat filename.
+ std::string oat_file_name;
+ if (DexLocationToOatFilename(dex_location_, isa_, &oat_file_name, &error_msg)) {
+ oat_.Reset(oat_file_name, false /* use_fd */);
+ } else {
+ LOG(WARNING) << "Failed to determine oat file name for dex location "
+ << dex_location_ << ": " << error_msg;
+ }
}
// Check if the dex directory is writable.
@@ -133,9 +155,11 @@ OatFileAssistant::OatFileAssistant(const char* dex_location,
size_t pos = dex_location_.rfind('/');
if (pos == std::string::npos) {
LOG(WARNING) << "Failed to determine dex file parent directory: " << dex_location_;
- } else {
+ } else if (!UseFdToReadFiles()) {
+ // We cannot test for parent access when using file descriptors. That's ok
+ // because in this case we will always pick the odex file anyway.
std::string parent = dex_location_.substr(0, pos);
- if (access(parent.c_str(), W_OK) == 0 || oat_fd > 0) {
+ if (access(parent.c_str(), W_OK) == 0) {
dex_parent_writable_ = true;
} else {
VLOG(oat) << "Dex parent of " << dex_location_ << " is not writable: " << strerror(errno);
@@ -150,6 +174,10 @@ OatFileAssistant::~OatFileAssistant() {
}
}
+bool OatFileAssistant::UseFdToReadFiles() {
+ return zip_fd_ >= 0;
+}
+
bool OatFileAssistant::IsInBootClassPath() {
// Note: We check the current boot class path, regardless of the ISA
// specified by the user. This is okay, because the boot class path should
@@ -236,6 +264,9 @@ OatFileAssistant::ResultOfAttemptToUpdate
OatFileAssistant::MakeUpToDate(bool profile_changed,
ClassLoaderContext* class_loader_context,
std::string* error_msg) {
+ // The method doesn't use zip_fd_ and directly opens dex files at dex_locations_.
+ CHECK_EQ(-1, zip_fd_) << "MakeUpToDate should not be called with zip_fd";
+
CompilerFilter::Filter target;
if (!GetRuntimeCompilerFilterOption(&target, error_msg)) {
return kUpdateNotAttempted;
@@ -868,7 +899,8 @@ const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() {
std::string error_msg;
if (DexFileLoader::GetMultiDexChecksums(dex_location_.c_str(),
&cached_required_dex_checksums_,
- &error_msg)) {
+ &error_msg,
+ zip_fd_)) {
required_dex_checksums_found_ = true;
has_original_dex_files_ = true;
} else {
@@ -931,7 +963,7 @@ const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() {
OatFileAssistant::OatFileInfo& OatFileAssistant::GetBestInfo() {
// TODO(calin): Document the side effects of class loading when
// running dalvikvm command line.
- if (dex_parent_writable_) {
+ if (dex_parent_writable_ || UseFdToReadFiles()) {
// If the parent of the dex file is writable it means that we can
// create the odex file. In this case we unconditionally pick the odex
// as the best oat file. This corresponds to the regular use case when
@@ -1020,26 +1052,28 @@ OatFileAssistant::OatStatus OatFileAssistant::OatFileInfo::Status() {
std::string error_msg;
std::string vdex_filename = GetVdexFilename(filename_);
std::unique_ptr<VdexFile> vdex;
- if (vdex_fd_ == -1) {
+ if (use_fd_) {
+ if (vdex_fd_ >= 0) {
+ struct stat s;
+ int rc = TEMP_FAILURE_RETRY(fstat(vdex_fd_, &s));
+ if (rc == -1) {
+ error_msg = StringPrintf("Failed getting length of the vdex file %s.", strerror(errno));
+ } else {
+ vdex = VdexFile::Open(vdex_fd_,
+ s.st_size,
+ vdex_filename,
+ false /*writable*/,
+ false /*low_4gb*/,
+ false /* unquicken */,
+ &error_msg);
+ }
+ }
+ } else {
vdex = VdexFile::Open(vdex_filename,
false /*writeable*/,
false /*low_4gb*/,
false /*unquicken*/,
&error_msg);
- } else {
- struct stat s;
- int rc = TEMP_FAILURE_RETRY(fstat(vdex_fd_, &s));
- if (rc == -1) {
- PLOG(WARNING) << "Failed getting length of vdex file";
- } else {
- vdex = VdexFile::Open(vdex_fd_,
- s.st_size,
- vdex_filename,
- false /*writable*/,
- false /*low_4gb*/,
- false /* unquicken */,
- &error_msg);
- }
}
if (vdex == nullptr) {
status_ = kOatCannotOpen;
@@ -1115,16 +1149,18 @@ const OatFile* OatFileAssistant::OatFileInfo::GetFile() {
load_attempted_ = true;
if (filename_provided_) {
std::string error_msg;
- if (oat_fd_ != -1 && vdex_fd_ != -1) {
- file_.reset(OatFile::Open(vdex_fd_,
- oat_fd_,
- filename_.c_str(),
- nullptr,
- nullptr,
- oat_file_assistant_->load_executable_,
- false /* low_4gb */,
- oat_file_assistant_->dex_location_.c_str(),
- &error_msg));
+ if (use_fd_) {
+ if (oat_fd_ >= 0 && vdex_fd_ >= 0) {
+ file_.reset(OatFile::Open(vdex_fd_,
+ oat_fd_,
+ filename_.c_str(),
+ nullptr,
+ nullptr,
+ oat_file_assistant_->load_executable_,
+ false /* low_4gb */,
+ oat_file_assistant_->dex_location_.c_str(),
+ &error_msg));
+ }
} else {
file_.reset(OatFile::Open(filename_.c_str(),
filename_.c_str(),
@@ -1168,12 +1204,13 @@ bool OatFileAssistant::OatFileInfo::ClassLoaderContextIsOkay(ClassLoaderContext*
const OatFile* file = GetFile();
if (file == nullptr) {
- return false;
+ // No oat file means we have nothing to verify.
+ return true;
}
- size_t dir_index = file->GetLocation().rfind('/');
+ size_t dir_index = oat_file_assistant_->dex_location_.rfind('/');
std::string classpath_dir = (dir_index != std::string::npos)
- ? file->GetLocation().substr(0, dir_index)
+ ? oat_file_assistant_->dex_location_.substr(0, dir_index)
: "";
if (!context->OpenDexFiles(oat_file_assistant_->isa_, classpath_dir)) {
@@ -1201,10 +1238,11 @@ void OatFileAssistant::OatFileInfo::Reset() {
status_attempted_ = false;
}
-void OatFileAssistant::OatFileInfo::Reset(const std::string& filename, int vdex_fd,
+void OatFileAssistant::OatFileInfo::Reset(const std::string& filename, bool use_fd, int vdex_fd,
int oat_fd) {
filename_provided_ = true;
filename_ = filename;
+ use_fd_ = use_fd;
vdex_fd_ = vdex_fd;
oat_fd_ = oat_fd;
Reset();
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 0f74ca4b02..6c01c1e880 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -121,9 +121,17 @@ class OatFileAssistant {
// executable code for this dex location.
OatFileAssistant(const char* dex_location,
const InstructionSet isa,
+ bool load_executable);
+
+ // Similar to this(const char*, const InstructionSet, bool), however, if a valid zip_fd is
+ // provided, vdex, oat, and zip files will be read from vdex_fd, oat_fd and zip_fd respectively.
+ // Otherwise, dex_location will be used to construct necessary filenames.
+ OatFileAssistant(const char* dex_location,
+ const InstructionSet isa,
bool load_executable,
- int vdex_fd = -1,
- int oat_fd = -1);
+ int vdex_fd,
+ int oat_fd,
+ int zip_fd);
~OatFileAssistant();
@@ -351,7 +359,7 @@ class OatFileAssistant {
// Clear any cached information and switch to getting info about the oat
// file with the given filename.
- void Reset(const std::string& filename, int vdex_fd = -1, int oat_fd = -1);
+ void Reset(const std::string& filename, bool use_fd, int vdex_fd = -1, int oat_fd = -1);
// Release the loaded oat file for runtime use.
// Returns null if the oat file hasn't been loaded or is out of date.
@@ -390,6 +398,7 @@ class OatFileAssistant {
int oat_fd_ = -1;
int vdex_fd_ = -1;
+ bool use_fd_ = false;
bool load_attempted_ = false;
std::unique_ptr<OatFile> file_;
@@ -420,6 +429,12 @@ class OatFileAssistant {
// Return info for the best oat file.
OatFileInfo& GetBestInfo();
+ // Returns true when vdex/oat/odex files should be read from file descriptors.
+ // The method checks the value of zip_fd_, and if the value is valid, returns
+ // true. This is required to have a deterministic behavior around how different
+ // files are being read.
+ bool UseFdToReadFiles();
+
// Returns true if the dex checksums in the given vdex file are up to date
// with respect to the dex location. If the dex checksums are not up to
// date, error_msg is updated with a message describing the problem.
@@ -467,7 +482,7 @@ class OatFileAssistant {
// In a properly constructed OatFileAssistant object, isa_ should be either
// the 32 or 64 bit variant for the current device.
- const InstructionSet isa_ = kNone;
+ const InstructionSet isa_ = InstructionSet::kNone;
// Whether we will attempt to load oat files executable.
bool load_executable_ = false;
@@ -482,6 +497,9 @@ class OatFileAssistant {
OatFileInfo odex_;
OatFileInfo oat_;
+ // File descriptor corresponding to apk, dex file, or zip.
+ int zip_fd_;
+
// Cached value of the image info.
// Use the GetImageInfo method rather than accessing these directly.
// TODO: The image info should probably be moved out of the oat file
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index d99036df7e..bd500ebe77 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -241,12 +241,14 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithFd) {
android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY));
android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY));
+ android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
OatFileAssistant oat_file_assistant(dex_location.c_str(),
kRuntimeISA,
false,
vdex_fd.get(),
- odex_fd.get());
+ odex_fd.get(),
+ zip_fd.get());
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
@@ -262,12 +264,12 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithFd) {
EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
}
-// Case: Passing valid odex fd, however, invalid fd for vdex with
-// the dex file.
-// Expect: The status is kDex2oatFromScratch.
-TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidVdexFd) {
+// Case: Passing invalid odex fd and valid vdex and zip fds.
+// Expect: The status should be kDex2OatForBootImage.
+TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexFd) {
std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
std::string odex_location = GetScratchDir() + "/OatUpToDate.odex";
+ std::string vdex_location = GetScratchDir() + "/OatUpToDate.vdex";
Copy(GetDexSrc1(), dex_location);
GenerateOatForTest(dex_location.c_str(),
@@ -277,26 +279,31 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidVdexFd) {
false,
false);
- android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY));
+ android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY));
+ android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
OatFileAssistant oat_file_assistant(dex_location.c_str(),
kRuntimeISA,
false,
- -1,
- odex_fd.get());
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
+ vdex_fd.get(),
+ -1 /* oat_fd */,
+ zip_fd.get());
+ EXPECT_EQ(-OatFileAssistant::kDex2OatForBootImage,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
- EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
+ EXPECT_EQ(-OatFileAssistant::kDex2OatForBootImage,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_EQ(OatFileAssistant::kOatBootImageOutOfDate, oat_file_assistant.OdexFileStatus());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
+ EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
}
-// Case: Passing valid vdex fd, however, invalid fd for odex with
-// the dex file.
-// Expect: The status is kDex2oatFromScratch.
-TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexFd) {
+// Case: Passing invalid vdex fd and valid odex and zip fds.
+// Expect: The status should be kDex2OatFromScratch.
+TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidVdexFd) {
std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
std::string odex_location = GetScratchDir() + "/OatUpToDate.odex";
- std::string vdex_location = GetScratchDir() + "/OatUpToDate.vdex";
Copy(GetDexSrc1(), dex_location);
GenerateOatForTest(dex_location.c_str(),
@@ -306,36 +313,38 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexFd) {
false,
false);
- android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY));
+ android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY));
+ android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
OatFileAssistant oat_file_assistant(dex_location.c_str(),
kRuntimeISA,
false,
- vdex_fd.get(),
- -1);
- // Even though the vdex file is up to date, because we don't have the oat
- // file, we can't know that the vdex depends on the boot image and is up to
- // date with respect to the boot image. Instead we must assume the vdex file
- // depends on the boot image and is out of date with respect to the boot
- // image.
- EXPECT_EQ(-OatFileAssistant::kDex2OatForBootImage,
+ -1 /* vdex_fd */,
+ odex_fd.get(),
+ zip_fd.get());
+
+ EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
- EXPECT_EQ(OatFileAssistant::kOatBootImageOutOfDate, oat_file_assistant.OdexFileStatus());
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
+ EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
}
-// Case: Passing invalid vdex and odex fd with the dex file.
+// Case: Passing invalid vdex and odex fd with valid zip fd.
// Expect: The status is kDex2oatFromScratch.
TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexVdexFd) {
std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
Copy(GetDexSrc1(), dex_location);
+ android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
OatFileAssistant oat_file_assistant(dex_location.c_str(),
kRuntimeISA,
false,
- -1,
- -1);
+ -1 /* vdex_fd */,
+ -1 /* oat_fd */,
+ zip_fd);
EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -1328,17 +1337,17 @@ TEST(OatFileAssistantUtilsTest, DexLocationToOdexFilename) {
std::string odex_file;
EXPECT_TRUE(OatFileAssistant::DexLocationToOdexFilename(
- "/foo/bar/baz.jar", kArm, &odex_file, &error_msg)) << error_msg;
+ "/foo/bar/baz.jar", InstructionSet::kArm, &odex_file, &error_msg)) << error_msg;
EXPECT_EQ("/foo/bar/oat/arm/baz.odex", odex_file);
EXPECT_TRUE(OatFileAssistant::DexLocationToOdexFilename(
- "/foo/bar/baz.funnyext", kArm, &odex_file, &error_msg)) << error_msg;
+ "/foo/bar/baz.funnyext", InstructionSet::kArm, &odex_file, &error_msg)) << error_msg;
EXPECT_EQ("/foo/bar/oat/arm/baz.odex", odex_file);
EXPECT_FALSE(OatFileAssistant::DexLocationToOdexFilename(
- "nopath.jar", kArm, &odex_file, &error_msg));
+ "nopath.jar", InstructionSet::kArm, &odex_file, &error_msg));
EXPECT_FALSE(OatFileAssistant::DexLocationToOdexFilename(
- "/foo/bar/baz_noext", kArm, &odex_file, &error_msg));
+ "/foo/bar/baz_noext", InstructionSet::kArm, &odex_file, &error_msg));
}
// Verify the dexopt status values from dalvik.system.DexFile
@@ -1466,6 +1475,33 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithOutOfDateContext) {
default_filter, false, false, updated_context.get()));
}
+TEST_F(OatFileAssistantTest, GetDexOptNeededWithUpToDateContextRelative) {
+ std::string dex_location = GetScratchDir() + "/TestDex.jar";
+ std::string context_location = GetScratchDir() + "/ContextDex.jar";
+ Copy(GetDexSrc1(), dex_location);
+ Copy(GetDexSrc2(), context_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ const CompilerFilter::Filter default_filter =
+ OatFileAssistant::kDefaultCompilerFilterForDexLoading;
+ std::string error_msg;
+ std::string context_str = "PCL[" + context_location + "]";
+ std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_str);
+ ASSERT_TRUE(context != nullptr);
+ ASSERT_TRUE(context->OpenDexFiles(kRuntimeISA, ""));
+
+ int status = oat_file_assistant.MakeUpToDate(false, context.get(), &error_msg);
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+
+ // A relative context simulates a dependent split context.
+ std::unique_ptr<ClassLoaderContext> relative_context =
+ ClassLoaderContext::Create("PCL[ContextDex.jar]");
+ EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(
+ default_filter, false, false, relative_context.get()));
+}
+
// TODO: More Tests:
// * Test class linker falls back to unquickened dex for DexNoOat
// * Test class linker falls back to unquickened dex for MultiDexNoOat
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 1e7cf723dc..ee35d9cd12 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -25,6 +25,7 @@
#include "art_field-inl.h"
#include "base/bit_vector-inl.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/systrace.h"
@@ -597,8 +598,12 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
if (oat_file_assistant.HasOriginalDexFiles()) {
if (Runtime::Current()->IsDexFileFallbackEnabled()) {
static constexpr bool kVerifyChecksum = true;
- if (!DexFileLoader::Open(
- dex_location, dex_location, kVerifyChecksum, /*out*/ &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(dex_location,
+ dex_location,
+ Runtime::Current()->IsVerificationEnabled(),
+ kVerifyChecksum,
+ /*out*/ &error_msg,
+ &dex_files)) {
LOG(WARNING) << error_msg;
error_msgs->push_back("Failed to open dex files from " + std::string(dex_location)
+ " because: " + error_msg);
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 3625b9e7a7..4443255f64 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -136,8 +136,8 @@ class PACKED(4) OatQuickMethodHeader {
bool Contains(uintptr_t pc) const {
uintptr_t code_start = reinterpret_cast<uintptr_t>(code_);
- static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
- if (kRuntimeISA == kArm) {
+ static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
+ if (kRuntimeISA == InstructionSet::kArm) {
// On Thumb-2, the pc is offset by one.
code_start++;
}
@@ -149,8 +149,8 @@ class PACKED(4) OatQuickMethodHeader {
// (not `kThumb2`), *but* we always generate code for the Thumb-2
// instruction set anyway. Thumb-2 requires the entrypoint to be of
// offset 1.
- static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
- return (kRuntimeISA == kArm)
+ static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
+ return (kRuntimeISA == InstructionSet::kArm)
? reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(code_) | 1)
: code_;
}
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 71d7b6c34d..cc09a776b8 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -18,6 +18,7 @@
#include <sstream>
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/stringpiece.h"
#include "debugger.h"
@@ -368,7 +369,7 @@ bool ParsedOptions::ProcessSpecialOptions(const RuntimeOptions& options,
} else if (option == "imageinstructionset") {
const char* isa_str = reinterpret_cast<const char*>(options[i].second);
auto&& image_isa = GetInstructionSetFromString(isa_str);
- if (image_isa == kNone) {
+ if (image_isa == InstructionSet::kNone) {
Usage("%s is not a valid instruction set.", isa_str);
return false;
}
diff --git a/runtime/prebuilt_tools_test.cc b/runtime/prebuilt_tools_test.cc
index c2b34c859e..6fa9b3424d 100644
--- a/runtime/prebuilt_tools_test.cc
+++ b/runtime/prebuilt_tools_test.cc
@@ -50,7 +50,7 @@ TEST_F(PrebuiltToolsTest, CheckHostTools) {
TEST_F(PrebuiltToolsTest, CheckTargetTools) {
// Other prebuilts are missing from the build server's repo manifest.
- InstructionSet isas[] = { kThumb2 }; // NOLINT
+ InstructionSet isas[] = { InstructionSet::kThumb2 }; // NOLINT
for (InstructionSet isa : isas) {
std::string tools_dir = GetAndroidTargetToolsDir(isa);
if (tools_dir.empty()) {
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index b055bf94d8..3ad3a4b6a9 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -188,7 +188,7 @@ TEST_F(ProxyTest, ProxyFieldHelper) {
ArtField* field = &static_fields->At(0);
EXPECT_STREQ("interfaces", field->GetName());
EXPECT_STREQ("[Ljava/lang/Class;", field->GetTypeDescriptor());
- EXPECT_OBJ_PTR_EQ(interfacesFieldClass.Get(), field->GetType<true>());
+ EXPECT_OBJ_PTR_EQ(interfacesFieldClass.Get(), field->ResolveType());
std::string temp;
EXPECT_STREQ("L$Proxy1234;", field->GetDeclaringClass()->GetDescriptor(&temp));
EXPECT_FALSE(field->IsPrimitiveType());
@@ -197,7 +197,7 @@ TEST_F(ProxyTest, ProxyFieldHelper) {
field = &static_fields->At(1);
EXPECT_STREQ("throws", field->GetName());
EXPECT_STREQ("[[Ljava/lang/Class;", field->GetTypeDescriptor());
- EXPECT_OBJ_PTR_EQ(throwsFieldClass.Get(), field->GetType<true>());
+ EXPECT_OBJ_PTR_EQ(throwsFieldClass.Get(), field->ResolveType());
EXPECT_STREQ("L$Proxy1234;", field->GetDeclaringClass()->GetDescriptor(&temp));
EXPECT_FALSE(field->IsPrimitiveType());
}
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index f28f0cabe2..9683cedd4d 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -28,7 +28,7 @@
#include "mirror/class-inl.h"
#include "mirror/executable.h"
#include "mirror/object_array-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "nth_caller_visitor.h"
#include "scoped_thread_state_change-inl.h"
#include "stack_reference.h"
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index fa2f1e5793..7794872c83 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -24,7 +24,7 @@
#include "common_compiler_test.h"
#include "java_vm_ext.h"
#include "jni_internal.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
namespace art {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 7f2f7895db..f09b6c9825 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -60,6 +60,7 @@
#include "base/arena_allocator.h"
#include "base/dumpable.h"
#include "base/enums.h"
+#include "base/file_utils.h"
#include "base/memory_tool.h"
#include "base/stl_util.h"
#include "base/systrace.h"
@@ -134,9 +135,7 @@
#include "native/sun_misc_Unsafe.h"
#include "native_bridge_art_interface.h"
#include "native_stack_dump.h"
-#include "nativehelper/JniConstants.h"
-#include "nativehelper/JniConstants-priv.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "oat_file.h"
#include "oat_file_manager.h"
#include "object_callbacks.h"
@@ -145,6 +144,7 @@
#include "quick/quick_method_frame_info.h"
#include "reflection.h"
#include "runtime_callbacks.h"
+#include "runtime_intrinsics.h"
#include "runtime_options.h"
#include "scoped_thread_state_change-inl.h"
#include "sigchain.h"
@@ -208,7 +208,7 @@ Runtime::Runtime()
: resolution_method_(nullptr),
imt_conflict_method_(nullptr),
imt_unimplemented_method_(nullptr),
- instruction_set_(kNone),
+ instruction_set_(InstructionSet::kNone),
compiler_callbacks_(nullptr),
is_zygote_(false),
must_relocate_(false),
@@ -412,10 +412,6 @@ Runtime::~Runtime() {
// instance. We rely on a small initialization order issue in Runtime::Start() that requires
// elements of WellKnownClasses to be null, see b/65500943.
WellKnownClasses::Clear();
-
- // Ensure that libnativehelper caching is invalidated, in case a new runtime is to be brought
- // up later.
- android::ClearJniConstantsCache();
}
struct AbortState {
@@ -513,6 +509,10 @@ void Runtime::Abort(const char* msg) {
UNUSED(old_value);
#endif
+#ifdef ART_TARGET_ANDROID
+ android_set_abort_message(msg);
+#endif
+
// Ensure that we don't have multiple threads trying to abort at once,
// which would result in significantly worse diagnostics.
MutexLock mu(Thread::Current(), *Locks::abort_lock_);
@@ -740,6 +740,11 @@ bool Runtime::Start() {
InitNativeMethods();
}
+ // IntializeIntrinsics needs to be called after the WellKnownClasses::Init in InitNativeMethods
+ // because in checking the invocation types of intrinsic methods ArtMethod::GetInvokeType()
+ // needs the SignaturePolymorphic annotation class which is initialized in WellKnownClasses::Init.
+ InitializeIntrinsics();
+
// Initialize well known thread group values that may be accessed threads while attaching.
InitThreadGroups(self);
@@ -1026,7 +1031,12 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
continue;
}
- if (!DexFileLoader::Open(dex_filename, dex_location, kVerifyChecksum, &error_msg, dex_files)) {
+ if (!DexFileLoader::Open(dex_filename,
+ dex_location,
+ Runtime::Current()->IsVerificationEnabled(),
+ kVerifyChecksum,
+ &error_msg,
+ dex_files)) {
LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
++failure_count;
}
@@ -1242,13 +1252,13 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
// Change the implicit checks flags based on runtime architecture.
switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
- case kX86:
- case kArm64:
- case kX86_64:
- case kMips:
- case kMips64:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
+ case InstructionSet::kX86:
+ case InstructionSet::kArm64:
+ case InstructionSet::kX86_64:
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64:
implicit_null_checks_ = true;
// Installing stack protection does not play well with valgrind.
implicit_so_checks_ = !(RUNNING_ON_MEMORY_TOOL && kMemoryToolIsValgrind);
@@ -1548,11 +1558,7 @@ void Runtime::InitNativeMethods() {
// Must be in the kNative state for calling native methods (JNI_OnLoad code).
CHECK_EQ(self->GetState(), kNative);
- // First set up JniConstants, which is used by both the runtime's built-in native
- // methods and libcore.
- JniConstants::init(env);
-
- // Then set up the native methods provided by the runtime itself.
+ // Set up the native methods provided by the runtime itself.
RegisterRuntimeNativeMethods(env);
// Initialize classes used in JNI. The initialization requires runtime native
@@ -1963,7 +1969,7 @@ ArtMethod* Runtime::CreateCalleeSaveMethod() {
auto* method = CreateRuntimeMethod(GetClassLinker(), GetLinearAlloc());
PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
- DCHECK_NE(instruction_set_, kNone);
+ DCHECK_NE(instruction_set_, InstructionSet::kNone);
DCHECK(method->IsRuntimeMethod());
return method;
}
@@ -2020,32 +2026,32 @@ void Runtime::BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint) {
void Runtime::SetInstructionSet(InstructionSet instruction_set) {
instruction_set_ = instruction_set;
- if ((instruction_set_ == kThumb2) || (instruction_set_ == kArm)) {
+ if ((instruction_set_ == InstructionSet::kThumb2) || (instruction_set_ == InstructionSet::kArm)) {
for (int i = 0; i != kCalleeSaveSize; ++i) {
CalleeSaveType type = static_cast<CalleeSaveType>(i);
callee_save_method_frame_infos_[i] = arm::ArmCalleeSaveMethodFrameInfo(type);
}
- } else if (instruction_set_ == kMips) {
+ } else if (instruction_set_ == InstructionSet::kMips) {
for (int i = 0; i != kCalleeSaveSize; ++i) {
CalleeSaveType type = static_cast<CalleeSaveType>(i);
callee_save_method_frame_infos_[i] = mips::MipsCalleeSaveMethodFrameInfo(type);
}
- } else if (instruction_set_ == kMips64) {
+ } else if (instruction_set_ == InstructionSet::kMips64) {
for (int i = 0; i != kCalleeSaveSize; ++i) {
CalleeSaveType type = static_cast<CalleeSaveType>(i);
callee_save_method_frame_infos_[i] = mips64::Mips64CalleeSaveMethodFrameInfo(type);
}
- } else if (instruction_set_ == kX86) {
+ } else if (instruction_set_ == InstructionSet::kX86) {
for (int i = 0; i != kCalleeSaveSize; ++i) {
CalleeSaveType type = static_cast<CalleeSaveType>(i);
callee_save_method_frame_infos_[i] = x86::X86CalleeSaveMethodFrameInfo(type);
}
- } else if (instruction_set_ == kX86_64) {
+ } else if (instruction_set_ == InstructionSet::kX86_64) {
for (int i = 0; i != kCalleeSaveSize; ++i) {
CalleeSaveType type = static_cast<CalleeSaveType>(i);
callee_save_method_frame_infos_[i] = x86_64::X86_64CalleeSaveMethodFrameInfo(type);
}
- } else if (instruction_set_ == kArm64) {
+ } else if (instruction_set_ == InstructionSet::kArm64) {
for (int i = 0; i != kCalleeSaveSize; ++i) {
CalleeSaveType type = static_cast<CalleeSaveType>(i);
callee_save_method_frame_infos_[i] = arm64::Arm64CalleeSaveMethodFrameInfo(type);
diff --git a/runtime/runtime_callbacks.cc b/runtime/runtime_callbacks.cc
index f164f7c8ec..339fe822fd 100644
--- a/runtime/runtime_callbacks.cc
+++ b/runtime/runtime_callbacks.cc
@@ -43,6 +43,17 @@ void RuntimeCallbacks::RemoveMethodInspectionCallback(MethodInspectionCallback*
Remove(cb, &method_inspection_callbacks_);
}
+bool RuntimeCallbacks::IsMethodSafeToJit(ArtMethod* m) {
+ for (MethodInspectionCallback* cb : method_inspection_callbacks_) {
+ if (!cb->IsMethodSafeToJit(m)) {
+ DCHECK(cb->IsMethodBeingInspected(m))
+ << "Contract requires that !IsMethodSafeToJit(m) -> IsMethodBeingInspected(m)";
+ return false;
+ }
+ }
+ return true;
+}
+
bool RuntimeCallbacks::IsMethodBeingInspected(ArtMethod* m) {
for (MethodInspectionCallback* cb : method_inspection_callbacks_) {
if (cb->IsMethodBeingInspected(m)) {
diff --git a/runtime/runtime_callbacks.h b/runtime/runtime_callbacks.h
index c9360491bb..c1ba9643a7 100644
--- a/runtime/runtime_callbacks.h
+++ b/runtime/runtime_callbacks.h
@@ -104,6 +104,11 @@ class MethodInspectionCallback {
// Returns true if the method is being inspected currently and the runtime should not modify it in
// potentially dangerous ways (i.e. replace with compiled version, JIT it, etc).
virtual bool IsMethodBeingInspected(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+ // Returns true if the method is safe to Jit, false otherwise.
+ // Note that '!IsMethodSafeToJit(m) implies IsMethodBeingInspected(m)'. That is that if this
+ // method returns false IsMethodBeingInspected must return true.
+ virtual bool IsMethodSafeToJit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
class RuntimeCallbacks {
@@ -167,6 +172,11 @@ class RuntimeCallbacks {
// on by some code.
bool IsMethodBeingInspected(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns false if some MethodInspectionCallback indicates the method cannot be safetly jitted
+ // (which implies that it is being Inspected). Returns true otherwise. If it returns false the
+ // entrypoint should not be changed to JITed code.
+ bool IsMethodSafeToJit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+
void AddMethodInspectionCallback(MethodInspectionCallback* cb)
REQUIRES_SHARED(Locks::mutator_lock_);
void RemoveMethodInspectionCallback(MethodInspectionCallback* cb)
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index ef172586cf..0b69851a55 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -38,7 +38,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "monitor.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc
index 940e4611f6..eb69d91dad 100644
--- a/runtime/runtime_common.cc
+++ b/runtime/runtime_common.cc
@@ -25,6 +25,7 @@
#include "android-base/stringprintf.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
diff --git a/runtime/runtime_intrinsics.cc b/runtime/runtime_intrinsics.cc
new file mode 100644
index 0000000000..f710ebeb4c
--- /dev/null
+++ b/runtime/runtime_intrinsics.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runtime_intrinsics.h"
+
+#include "art_method-inl.h"
+#include "class_linker.h"
+#include "intrinsics_enum.h"
+#include "invoke_type.h"
+#include "mirror/class.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread.h"
+
+namespace art {
+
+namespace {
+
+// Initialize an intrinsic. Returns true if the intrinsic is already
+// initialized, false otherwise.
+bool InitializeIntrinsic(Thread* self,
+ Intrinsics intrinsic,
+ InvokeType invoke_type,
+ const char* class_name,
+ const char* method_name,
+ const char* signature)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ PointerSize image_size = class_linker->GetImagePointerSize();
+ ObjPtr<mirror::Class> cls = class_linker->FindSystemClass(self, class_name);
+ if (cls == nullptr) {
+ LOG(FATAL) << "Could not find class of intrinsic " << class_name;
+ }
+
+ ArtMethod* method = cls->FindClassMethod(method_name, signature, image_size);
+ if (method == nullptr || method->GetDeclaringClass() != cls) {
+ LOG(FATAL) << "Could not find method of intrinsic "
+ << class_name << " " << method_name << " " << signature;
+ }
+
+ CHECK_EQ(method->GetInvokeType(), invoke_type);
+ if (method->IsIntrinsic()) {
+ CHECK_EQ(method->GetIntrinsic(), static_cast<uint32_t>(intrinsic));
+ return true;
+ } else {
+ method->SetIntrinsic(static_cast<uint32_t>(intrinsic));
+ return false;
+ }
+}
+
+} // namespace
+
+void InitializeIntrinsics() {
+ ScopedObjectAccess soa(Thread::Current());
+ // Initialization here uses the short-circuit operator || to stop
+ // initializing if there's an already initialized intrinsic.
+#define SETUP_INTRINSICS(Name, InvokeType, _, __, ___, ClassName, MethodName, Signature) \
+ InitializeIntrinsic(soa.Self(), \
+ Intrinsics::k##Name, \
+ InvokeType, \
+ ClassName, \
+ MethodName, \
+ Signature) ||
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(SETUP_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef SETUP_INTRINSICS
+ true;
+}
+
+} // namespace art
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatField.java b/runtime/runtime_intrinsics.h
index a25ee2869d..98dc9bc8c9 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatField.java
+++ b/runtime/runtime_intrinsics.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,29 +14,13 @@
* limitations under the License.
*/
-package com.android.ahat.heapdump;
+#ifndef ART_RUNTIME_RUNTIME_INTRINSICS_H_
+#define ART_RUNTIME_RUNTIME_INTRINSICS_H_
-public class AhatField {
- private final String mName;
- private final String mType;
+namespace art {
- public AhatField(String name, String type) {
- mName = name;
- mType = type;
- }
+void InitializeIntrinsics();
- /**
- * Returns the name of the field.
- */
- public String getName() {
- return mName;
- }
-
- /**
- * Returns a description of the type of the field.
- */
- public String getType() {
- return mType;
- }
-}
+} // namespace art
+#endif // ART_RUNTIME_RUNTIME_INTRINSICS_H_
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index a1f14be0f1..bf5d718113 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -34,6 +34,7 @@
#endif
#include "arch/instruction_set.h"
+#include "base/file_utils.h"
#include "base/time_utils.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
diff --git a/runtime/standard_dex_file.h b/runtime/standard_dex_file.h
index 1ec06edef3..784ab31821 100644
--- a/runtime/standard_dex_file.h
+++ b/runtime/standard_dex_file.h
@@ -28,6 +28,10 @@ class OatDexFile;
// Standard dex file. This is the format that is packaged in APKs and produced by tools.
class StandardDexFile : public DexFile {
public:
+ class Header : public DexFile::Header {
+ // Same for now.
+ };
+
static const uint8_t kDexMagic[kDexMagicSize];
static constexpr size_t kNumDexVersions = 4;
static const uint8_t kDexMagicVersions[kNumDexVersions][kDexVersionLen];
@@ -40,13 +44,18 @@ class StandardDexFile : public DexFile {
static bool IsVersionValid(const uint8_t* magic);
virtual bool IsVersionValid() const OVERRIDE;
+ bool IsStandardDexFile() const OVERRIDE {
+ return true;
+ }
+
private:
StandardDexFile(const uint8_t* base,
size_t size,
const std::string& location,
uint32_t location_checksum,
- const OatDexFile* oat_dex_file)
- : DexFile(base, size, location, location_checksum, oat_dex_file) {}
+ const OatDexFile* oat_dex_file,
+ DexFileContainer* container)
+ : DexFile(base, size, location, location_checksum, oat_dex_file, container) {}
friend class DexFileLoader;
friend class DexFileVerifierTest;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 2753bf71eb..712eabc888 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -39,6 +39,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/bit_utils.h"
+#include "base/file_utils.h"
#include "base/memory_tool.h"
#include "base/mutex.h"
#include "base/systrace.h"
@@ -70,8 +71,8 @@
#include "mirror/stack_trace_element.h"
#include "monitor.h"
#include "native_stack_dump.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "nth_caller_visitor.h"
#include "oat_quick_method_header.h"
#include "obj_ptr-inl.h"
@@ -154,7 +155,7 @@ void Thread::InitTlsEntryPoints() {
}
void Thread::ResetQuickAllocEntryPointsForThread(bool is_marking) {
- if (kUseReadBarrier && kRuntimeISA != kX86_64) {
+ if (kUseReadBarrier && kRuntimeISA != InstructionSet::kX86_64) {
// Allocation entrypoint switching is currently only implemented for X86_64.
is_marking = true;
}
@@ -1113,7 +1114,7 @@ bool Thread::InitStackHwm() {
// effectively disable stack overflow checks (we'll get segfaults, potentially) by setting
// stack_begin to 0.
const bool valgrind_on_arm =
- (kRuntimeISA == kArm || kRuntimeISA == kArm64) &&
+ (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kArm64) &&
kMemoryToolIsValgrind &&
RUNNING_ON_MEMORY_TOOL != 0;
if (valgrind_on_arm) {
@@ -1346,36 +1347,26 @@ void Thread::ClearSuspendBarrier(AtomicInteger* target) {
}
void Thread::RunCheckpointFunction() {
- bool done = false;
- do {
- // Grab the suspend_count lock and copy the checkpoints one by one. When the last checkpoint is
- // copied, clear the list and the flag. The RequestCheckpoint function will also grab this lock
- // to prevent a race between setting the kCheckpointRequest flag and clearing it.
- Closure* checkpoint = nullptr;
- {
- MutexLock mu(this, *Locks::thread_suspend_count_lock_);
- if (tlsPtr_.checkpoint_function != nullptr) {
- checkpoint = tlsPtr_.checkpoint_function;
- if (!checkpoint_overflow_.empty()) {
- // Overflow list not empty, copy the first one out and continue.
- tlsPtr_.checkpoint_function = checkpoint_overflow_.front();
- checkpoint_overflow_.pop_front();
- } else {
- // No overflow checkpoints, this means that we are on the last pending checkpoint.
- tlsPtr_.checkpoint_function = nullptr;
- AtomicClearFlag(kCheckpointRequest);
- done = true;
- }
- } else {
- LOG(FATAL) << "Checkpoint flag set without pending checkpoint";
- }
+ // Grab the suspend_count lock, get the next checkpoint and update all the checkpoint fields. If
+ // there are no more checkpoints we will also clear the kCheckpointRequest flag.
+ Closure* checkpoint;
+ {
+ MutexLock mu(this, *Locks::thread_suspend_count_lock_);
+ checkpoint = tlsPtr_.checkpoint_function;
+ if (!checkpoint_overflow_.empty()) {
+ // Overflow list not empty, copy the first one out and continue.
+ tlsPtr_.checkpoint_function = checkpoint_overflow_.front();
+ checkpoint_overflow_.pop_front();
+ } else {
+ // No overflow checkpoints. Clear the kCheckpointRequest flag
+ tlsPtr_.checkpoint_function = nullptr;
+ AtomicClearFlag(kCheckpointRequest);
}
-
- // Outside the lock, run the checkpoint functions that we collected.
- ScopedTrace trace("Run checkpoint function");
- DCHECK(checkpoint != nullptr);
- checkpoint->Run(this);
- } while (!done);
+ }
+ // Outside the lock, run the checkpoint function.
+ ScopedTrace trace("Run checkpoint function");
+ CHECK(checkpoint != nullptr) << "Checkpoint flag set without pending checkpoint";
+ checkpoint->Run(this);
}
void Thread::RunEmptyCheckpoint() {
@@ -1451,21 +1442,25 @@ class BarrierClosure : public Closure {
Barrier barrier_;
};
+// RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
bool Thread::RequestSynchronousCheckpoint(Closure* function) {
+ Thread* self = Thread::Current();
if (this == Thread::Current()) {
+ Locks::thread_list_lock_->AssertExclusiveHeld(self);
+ // Unlock the tll before running so that the state is the same regardless of thread.
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
// Asked to run on this thread. Just run.
function->Run(this);
return true;
}
- Thread* self = Thread::Current();
// The current thread is not this thread.
if (GetState() == ThreadState::kTerminated) {
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
return false;
}
- // Note: we're holding the thread-list lock. The thread cannot die at this point.
struct ScopedThreadListLockUnlock {
explicit ScopedThreadListLockUnlock(Thread* self_in) RELEASE(*Locks::thread_list_lock_)
: self_thread(self_in) {
@@ -1482,6 +1477,7 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function) {
};
for (;;) {
+ Locks::thread_list_lock_->AssertExclusiveHeld(self);
// If this thread is runnable, try to schedule a checkpoint. Do some gymnastics to not hold the
// suspend-count lock for too long.
if (GetState() == ThreadState::kRunnable) {
@@ -1492,8 +1488,9 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function) {
installed = RequestCheckpoint(&barrier_closure);
}
if (installed) {
- // Relinquish the thread-list lock, temporarily. We should not wait holding any locks.
- ScopedThreadListLockUnlock stllu(self);
+ // Relinquish the thread-list lock. We should not wait holding any locks. We cannot
+ // reacquire it since we don't know if 'this' hasn't been deleted yet.
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
ScopedThreadSuspension sts(self, ThreadState::kWaiting);
barrier_closure.Wait(self);
return true;
@@ -1515,6 +1512,8 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function) {
}
{
+ // Release for the wait. The suspension will keep us from being deleted. Reacquire after so
+ // that we can call ModifySuspendCount without racing against ThreadList::Unregister.
ScopedThreadListLockUnlock stllu(self);
{
ScopedThreadSuspension sts(self, ThreadState::kWaiting);
@@ -1543,6 +1542,9 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function) {
Thread::resume_cond_->Broadcast(self);
}
+ // Release the thread_list_lock_ to be consistent with the barrier-closure path.
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
+
return true; // We're done, break out of the loop.
}
}
diff --git a/runtime/thread.h b/runtime/thread.h
index ab89778bf9..3b917bab9b 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -265,9 +265,13 @@ class Thread {
bool RequestCheckpoint(Closure* function)
REQUIRES(Locks::thread_suspend_count_lock_);
+
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
+ // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
+ // execute the checkpoint for us if it is Runnable.
bool RequestSynchronousCheckpoint(Closure* function)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(Locks::thread_list_lock_)
+ RELEASE(Locks::thread_list_lock_)
REQUIRES(!Locks::thread_suspend_count_lock_);
bool RequestEmptyCheckpoint()
REQUIRES(Locks::thread_suspend_count_lock_);
@@ -1352,6 +1356,9 @@ class Thread {
WARN_UNUSED
REQUIRES(Locks::thread_suspend_count_lock_);
+ // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
+ // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
+ // the kCheckpointRequest flag is cleared.
void RunCheckpointFunction();
void RunEmptyCheckpoint();
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 488e4a6517..88f1fc6991 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -21,11 +21,12 @@
#include <unistd.h>
#include <sstream>
+#include <vector>
#include "android-base/stringprintf.h"
#include "backtrace/BacktraceMap.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "base/histogram-inl.h"
#include "base/mutex-inl.h"
@@ -204,7 +205,11 @@ class DumpCheckpoint FINAL : public Closure {
: os_(os),
barrier_(0),
backtrace_map_(dump_native_stack ? BacktraceMap::Create(getpid()) : nullptr),
- dump_native_stack_(dump_native_stack) {}
+ dump_native_stack_(dump_native_stack) {
+ if (backtrace_map_ != nullptr) {
+ backtrace_map_->SetSuffixesToIgnore(std::vector<std::string> { "oat", "odex" });
+ }
+ }
void Run(Thread* thread) OVERRIDE {
// Note thread and self may not be equal if thread was already suspended at the point of the
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 4c3fa20c9d..4b5a7610a3 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -39,7 +39,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "os.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 1f6bd742b6..f6533a7130 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -18,41 +18,20 @@
#include <inttypes.h>
#include <pthread.h>
-#include <sys/mman.h> // For madvise
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
-// We need dladdr.
-#ifndef __APPLE__
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#define DEFINED_GNU_SOURCE
-#endif
-#include <dlfcn.h>
-#include <libgen.h>
-#ifdef DEFINED_GNU_SOURCE
-#undef _GNU_SOURCE
-#undef DEFINED_GNU_SOURCE
-#endif
-#endif
-
-
#include <memory>
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
-#include "base/stl_util.h"
-#include "base/unix_file/fd_file.h"
+#include "base/file_utils.h"
#include "dex_file-inl.h"
-#include "dex_file_loader.h"
-#include "dex_instruction.h"
-#include "oat_quick_method_header.h"
#include "os.h"
-#include "scoped_thread_state_change-inl.h"
#include "utf-inl.h"
#if defined(__APPLE__)
@@ -92,78 +71,6 @@ std::string GetThreadName(pid_t tid) {
return result;
}
-bool ReadFileToString(const std::string& file_name, std::string* result) {
- File file(file_name, O_RDONLY, false);
- if (!file.IsOpened()) {
- return false;
- }
-
- std::vector<char> buf(8 * KB);
- while (true) {
- int64_t n = TEMP_FAILURE_RETRY(read(file.Fd(), &buf[0], buf.size()));
- if (n == -1) {
- return false;
- }
- if (n == 0) {
- return true;
- }
- result->append(&buf[0], n);
- }
-}
-
-bool PrintFileToLog(const std::string& file_name, LogSeverity level) {
- File file(file_name, O_RDONLY, false);
- if (!file.IsOpened()) {
- return false;
- }
-
- constexpr size_t kBufSize = 256; // Small buffer. Avoid stack overflow and stack size warnings.
- char buf[kBufSize + 1]; // +1 for terminator.
- size_t filled_to = 0;
- while (true) {
- DCHECK_LT(filled_to, kBufSize);
- int64_t n = TEMP_FAILURE_RETRY(read(file.Fd(), &buf[filled_to], kBufSize - filled_to));
- if (n <= 0) {
- // Print the rest of the buffer, if it exists.
- if (filled_to > 0) {
- buf[filled_to] = 0;
- LOG(level) << buf;
- }
- return n == 0;
- }
- // Scan for '\n'.
- size_t i = filled_to;
- bool found_newline = false;
- for (; i < filled_to + n; ++i) {
- if (buf[i] == '\n') {
- // Found a line break, that's something to print now.
- buf[i] = 0;
- LOG(level) << buf;
- // Copy the rest to the front.
- if (i + 1 < filled_to + n) {
- memmove(&buf[0], &buf[i + 1], filled_to + n - i - 1);
- filled_to = filled_to + n - i - 1;
- } else {
- filled_to = 0;
- }
- found_newline = true;
- break;
- }
- }
- if (found_newline) {
- continue;
- } else {
- filled_to += n;
- // Check if we must flush now.
- if (filled_to == kBufSize) {
- buf[kBufSize] = 0;
- LOG(level) << buf;
- filled_to = 0;
- }
- }
- }
-}
-
void AppendPrettyDescriptor(const char* descriptor, std::string* result) {
// Count the number of '['s to get the dimensionality.
const char* c = descriptor;
@@ -718,197 +625,6 @@ void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu)
*task_cpu = strtoull(fields[36].c_str(), nullptr, 10);
}
-std::string GetAndroidRootSafe(std::string* error_msg) {
- // Prefer ANDROID_ROOT if it's set.
- const char* android_dir = getenv("ANDROID_ROOT");
- if (android_dir != nullptr) {
- if (!OS::DirectoryExists(android_dir)) {
- *error_msg = StringPrintf("Failed to find ANDROID_ROOT directory %s", android_dir);
- return "";
- }
- return android_dir;
- }
-
- // Check where libart is from, and derive from there. Only do this for non-Mac.
-#ifndef __APPLE__
- {
- Dl_info info;
- if (dladdr(reinterpret_cast<const void*>(&GetAndroidRootSafe), /* out */ &info) != 0) {
- // Make a duplicate of the fname so dirname can modify it.
- UniqueCPtr<char> fname(strdup(info.dli_fname));
-
- char* dir1 = dirname(fname.get()); // This is the lib directory.
- char* dir2 = dirname(dir1); // This is the "system" directory.
- if (OS::DirectoryExists(dir2)) {
- std::string tmp = dir2; // Make a copy here so that fname can be released.
- return tmp;
- }
- }
- }
-#endif
-
- // Try "/system".
- if (!OS::DirectoryExists("/system")) {
- *error_msg = "Failed to find ANDROID_ROOT directory /system";
- return "";
- }
- return "/system";
-}
-
-std::string GetAndroidRoot() {
- std::string error_msg;
- std::string ret = GetAndroidRootSafe(&error_msg);
- if (ret.empty()) {
- LOG(FATAL) << error_msg;
- UNREACHABLE();
- }
- return ret;
-}
-
-
-static const char* GetAndroidDirSafe(const char* env_var,
- const char* default_dir,
- std::string* error_msg) {
- const char* android_dir = getenv(env_var);
- if (android_dir == nullptr) {
- if (OS::DirectoryExists(default_dir)) {
- android_dir = default_dir;
- } else {
- *error_msg = StringPrintf("%s not set and %s does not exist", env_var, default_dir);
- return nullptr;
- }
- }
- if (!OS::DirectoryExists(android_dir)) {
- *error_msg = StringPrintf("Failed to find %s directory %s", env_var, android_dir);
- return nullptr;
- }
- return android_dir;
-}
-
-static const char* GetAndroidDir(const char* env_var, const char* default_dir) {
- std::string error_msg;
- const char* dir = GetAndroidDirSafe(env_var, default_dir, &error_msg);
- if (dir != nullptr) {
- return dir;
- } else {
- LOG(FATAL) << error_msg;
- return nullptr;
- }
-}
-
-const char* GetAndroidData() {
- return GetAndroidDir("ANDROID_DATA", "/data");
-}
-
-const char* GetAndroidDataSafe(std::string* error_msg) {
- return GetAndroidDirSafe("ANDROID_DATA", "/data", error_msg);
-}
-
-std::string GetDefaultBootImageLocation(std::string* error_msg) {
- std::string android_root = GetAndroidRootSafe(error_msg);
- if (android_root.empty()) {
- return "";
- }
- return StringPrintf("%s/framework/boot.art", android_root.c_str());
-}
-
-void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string* dalvik_cache,
- bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache) {
- CHECK(subdir != nullptr);
- std::string error_msg;
- const char* android_data = GetAndroidDataSafe(&error_msg);
- if (android_data == nullptr) {
- *have_android_data = false;
- *dalvik_cache_exists = false;
- *is_global_cache = false;
- return;
- } else {
- *have_android_data = true;
- }
- const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
- *dalvik_cache = dalvik_cache_root + subdir;
- *dalvik_cache_exists = OS::DirectoryExists(dalvik_cache->c_str());
- *is_global_cache = strcmp(android_data, "/data") == 0;
- if (create_if_absent && !*dalvik_cache_exists && !*is_global_cache) {
- // Don't create the system's /data/dalvik-cache/... because it needs special permissions.
- *dalvik_cache_exists = ((mkdir(dalvik_cache_root.c_str(), 0700) == 0 || errno == EEXIST) &&
- (mkdir(dalvik_cache->c_str(), 0700) == 0 || errno == EEXIST));
- }
-}
-
-std::string GetDalvikCache(const char* subdir) {
- CHECK(subdir != nullptr);
- const char* android_data = GetAndroidData();
- const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
- const std::string dalvik_cache = dalvik_cache_root + subdir;
- if (!OS::DirectoryExists(dalvik_cache.c_str())) {
- // TODO: Check callers. Traditional behavior is to not abort.
- return "";
- }
- return dalvik_cache;
-}
-
-bool GetDalvikCacheFilename(const char* location, const char* cache_location,
- std::string* filename, std::string* error_msg) {
- if (location[0] != '/') {
- *error_msg = StringPrintf("Expected path in location to be absolute: %s", location);
- return false;
- }
- std::string cache_file(&location[1]); // skip leading slash
- if (!android::base::EndsWith(location, ".dex") &&
- !android::base::EndsWith(location, ".art") &&
- !android::base::EndsWith(location, ".oat")) {
- cache_file += "/";
- cache_file += DexFileLoader::kClassesDex;
- }
- std::replace(cache_file.begin(), cache_file.end(), '/', '@');
- *filename = StringPrintf("%s/%s", cache_location, cache_file.c_str());
- return true;
-}
-
-std::string GetVdexFilename(const std::string& oat_location) {
- return ReplaceFileExtension(oat_location, "vdex");
-}
-
-static void InsertIsaDirectory(const InstructionSet isa, std::string* filename) {
- // in = /foo/bar/baz
- // out = /foo/bar/<isa>/baz
- size_t pos = filename->rfind('/');
- CHECK_NE(pos, std::string::npos) << *filename << " " << isa;
- filename->insert(pos, "/", 1);
- filename->insert(pos + 1, GetInstructionSetString(isa));
-}
-
-std::string GetSystemImageFilename(const char* location, const InstructionSet isa) {
- // location = /system/framework/boot.art
- // filename = /system/framework/<isa>/boot.art
- std::string filename(location);
- InsertIsaDirectory(isa, &filename);
- return filename;
-}
-
-bool FileExists(const std::string& filename) {
- struct stat buffer;
- return stat(filename.c_str(), &buffer) == 0;
-}
-
-bool FileExistsAndNotEmpty(const std::string& filename) {
- struct stat buffer;
- if (stat(filename.c_str(), &buffer) != 0) {
- return false;
- }
- return buffer.st_size > 0;
-}
-
-std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension) {
- const size_t last_ext = filename.find_last_of('.');
- if (last_ext == std::string::npos) {
- return filename + "." + new_extension;
- } else {
- return filename.substr(0, last_ext + 1) + new_extension;
- }
-}
-
std::string PrettyDescriptor(Primitive::Type type) {
return PrettyDescriptor(Primitive::Descriptor(type));
}
@@ -952,30 +668,10 @@ void ParseDouble(const std::string& option,
*parsed_value = value;
}
-int64_t GetFileSizeBytes(const std::string& filename) {
- struct stat stat_buf;
- int rc = stat(filename.c_str(), &stat_buf);
- return rc == 0 ? stat_buf.st_size : -1;
-}
-
void SleepForever() {
while (true) {
usleep(1000000);
}
}
-int MadviseLargestPageAlignedRegion(const uint8_t* begin, const uint8_t* end, int advice) {
- DCHECK_LE(begin, end);
- begin = AlignUp(begin, kPageSize);
- end = AlignDown(end, kPageSize);
- if (begin < end) {
- int result = madvise(const_cast<uint8_t*>(begin), end - begin, advice);
- if (result != 0) {
- PLOG(WARNING) << "madvise failed " << result;
- }
- return result;
- }
- return 0;
-}
-
} // namespace art
diff --git a/runtime/utils.h b/runtime/utils.h
index fbf812a6b3..ede32dc57a 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -20,12 +20,8 @@
#include <pthread.h>
#include <stdlib.h>
-#include <limits>
-#include <memory>
#include <random>
#include <string>
-#include <type_traits>
-#include <vector>
#include "arch/instruction_set.h"
#include "base/casts.h"
@@ -118,9 +114,6 @@ bool IsValidDescriptor(const char* s); // "Ljava/lang/String;"
// additionally allowing names that begin with '<' and end with '>'.
bool IsValidMemberName(const char* s);
-bool ReadFileToString(const std::string& file_name, std::string* result);
-bool PrintFileToLog(const std::string& file_name, LogSeverity level);
-
// Splits a string using the given separator character into a vector of
// strings. Empty strings will be omitted.
void Split(const std::string& s, char separator, std::vector<std::string>* result);
@@ -131,58 +124,12 @@ pid_t GetTid();
// Returns the given thread's name.
std::string GetThreadName(pid_t tid);
-// Reads data from "/proc/self/task/${tid}/stat".
-void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu);
-
// Sets the name of the current thread. The name may be truncated to an
// implementation-defined limit.
void SetThreadName(const char* thread_name);
-// Find $ANDROID_ROOT, /system, or abort.
-std::string GetAndroidRoot();
-// Find $ANDROID_ROOT, /system, or return an empty string.
-std::string GetAndroidRootSafe(std::string* error_msg);
-
-// Find $ANDROID_DATA, /data, or abort.
-const char* GetAndroidData();
-// Find $ANDROID_DATA, /data, or return null.
-const char* GetAndroidDataSafe(std::string* error_msg);
-
-// Returns the default boot image location (ANDROID_ROOT/framework/boot.art).
-// Returns an empty string if ANDROID_ROOT is not set.
-std::string GetDefaultBootImageLocation(std::string* error_msg);
-
-// Returns the dalvik-cache location, with subdir appended. Returns the empty string if the cache
-// could not be found.
-std::string GetDalvikCache(const char* subdir);
-// Return true if we found the dalvik cache and stored it in the dalvik_cache argument.
-// have_android_data will be set to true if we have an ANDROID_DATA that exists,
-// dalvik_cache_exists will be true if there is a dalvik-cache directory that is present.
-// The flag is_global_cache tells whether this cache is /data/dalvik-cache.
-void GetDalvikCache(const char* subdir, bool create_if_absent, std::string* dalvik_cache,
- bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache);
-
-// Returns the absolute dalvik-cache path for a DexFile or OatFile. The path returned will be
-// rooted at cache_location.
-bool GetDalvikCacheFilename(const char* file_location, const char* cache_location,
- std::string* filename, std::string* error_msg);
-
-// Returns the system location for an image
-std::string GetSystemImageFilename(const char* location, InstructionSet isa);
-
-// Returns the vdex filename for the given oat filename.
-std::string GetVdexFilename(const std::string& oat_filename);
-
-// Returns true if the file exists.
-bool FileExists(const std::string& filename);
-bool FileExistsAndNotEmpty(const std::string& filename);
-
-// Returns `filename` with the text after the last occurrence of '.' replaced with
-// `extension`. If `filename` does not contain a period, returns a string containing `filename`,
-// a period, and `new_extension`.
-// Example: ReplaceFileExtension("foo.bar", "abc") == "foo.abc"
-// ReplaceFileExtension("foo", "abc") == "foo.abc"
-std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension);
+// Reads data from "/proc/self/task/${tid}/stat".
+void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu);
class VoidFunctor {
public:
@@ -273,9 +220,6 @@ static T GetRandomNumber(T min, T max) {
return dist(rng);
}
-// Return the file size in bytes or -1 if the file does not exists.
-int64_t GetFileSizeBytes(const std::string& filename);
-
// Sleep forever and never come back.
NO_RETURN void SleepForever();
@@ -335,9 +279,6 @@ inline static int32_t Signum(T opnd) {
return (opnd < 0) ? -1 : ((opnd == 0) ? 0 : 1);
}
-// Madvise the largest page aligned region within begin and end.
-int MadviseLargestPageAlignedRegion(const uint8_t* begin, const uint8_t* end, int advice);
-
template <typename Func, typename... Args>
static inline void CheckedCall(const Func& function, const char* what, Args... args) {
int rc = function(args...);
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index e846c983af..1dc46871ac 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -20,6 +20,7 @@
#include <stdlib.h>
#include "base/enums.h"
+#include "base/file_utils.h"
#include "base/stl_util.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
@@ -291,7 +292,7 @@ TEST_F(UtilsTest, GetDalvikCache) {
TEST_F(UtilsTest, GetSystemImageFilename) {
EXPECT_STREQ("/system/framework/arm/boot.art",
- GetSystemImageFilename("/system/framework/boot.art", kArm).c_str());
+ GetSystemImageFilename("/system/framework/boot.art", InstructionSet::kArm).c_str());
}
TEST_F(UtilsTest, ExecSuccess) {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 0033167160..0f6244e125 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -553,7 +553,7 @@ MethodVerifier::MethodVerifier(Thread* self,
: self_(self),
arena_stack_(Runtime::Current()->GetArenaPool()),
allocator_(&arena_stack_),
- reg_types_(can_load_classes, allocator_),
+ reg_types_(can_load_classes, allocator_, allow_thread_suspension),
reg_table_(allocator_),
work_insn_idx_(dex::kDexNoIndex),
dex_method_idx_(dex_method_idx),
@@ -617,8 +617,8 @@ void MethodVerifier::FindLocksAtDexPc(ArtMethod* m, uint32_t dex_pc,
}
static bool HasMonitorEnterInstructions(const DexFile::CodeItem* const code_item) {
- for (const Instruction& inst : code_item->Instructions()) {
- if (inst.Opcode() == Instruction::MONITOR_ENTER) {
+ for (const DexInstructionPcPair& inst : code_item->Instructions()) {
+ if (inst->Opcode() == Instruction::MONITOR_ENTER) {
return true;
}
}
@@ -987,9 +987,17 @@ bool MethodVerifier::ComputeWidthsAndCountOps() {
size_t monitor_enter_count = 0;
IterationRange<DexInstructionIterator> instructions = code_item_->Instructions();
- DexInstructionIterator inst = instructions.begin();
- for ( ; inst < instructions.end(); ++inst) {
- Instruction::Code opcode = inst->Opcode();
+ // We can't assume the instruction is well formed, handle the case where calculating the size
+ // goes past the end of the code item.
+ SafeDexInstructionIterator it(instructions.begin(), instructions.end());
+ for ( ; !it.IsErrorState() && it < instructions.end(); ++it) {
+ // In case the instruction goes past the end of the code item, make sure to not process it.
+ SafeDexInstructionIterator next = it;
+ ++next;
+ if (next.IsErrorState() || next > instructions.end()) {
+ break;
+ }
+ Instruction::Code opcode = it->Opcode();
switch (opcode) {
case Instruction::APUT_OBJECT:
case Instruction::CHECK_CAST:
@@ -1010,14 +1018,13 @@ bool MethodVerifier::ComputeWidthsAndCountOps() {
default:
break;
}
- GetInstructionFlags(inst.GetDexPC(instructions.begin())).SetIsOpcode();
+ GetInstructionFlags(it.DexPc()).SetIsOpcode();
}
- if (inst != instructions.end()) {
+ if (it != instructions.end()) {
const size_t insns_size = code_item_->insns_size_in_code_units_;
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "code did not end where expected ("
- << inst.GetDexPC(instructions.begin()) << " vs. "
- << insns_size << ")";
+ << it.DexPc() << " vs. " << insns_size << ")";
return false;
}
@@ -1098,10 +1105,9 @@ bool MethodVerifier::VerifyInstructions() {
/* Flag the start of the method as a branch target, and a GC point due to stack overflow errors */
GetInstructionFlags(0).SetBranchTarget();
GetInstructionFlags(0).SetCompileTimeInfoPoint();
- IterationRange<DexInstructionIterator> instructions = code_item_->Instructions();
- for (auto inst = instructions.begin(); inst != instructions.end(); ++inst) {
- const uint32_t dex_pc = inst.GetDexPC(instructions.begin());
- if (!VerifyInstruction<kAllowRuntimeOnlyInstructions>(&*inst, dex_pc)) {
+ for (const DexInstructionPcPair& inst : code_item_->Instructions()) {
+ const uint32_t dex_pc = inst.DexPc();
+ if (!VerifyInstruction<kAllowRuntimeOnlyInstructions>(&inst.Inst(), dex_pc)) {
DCHECK_NE(failures_.size(), 0U);
return false;
}
@@ -1687,9 +1693,8 @@ void MethodVerifier::Dump(VariableIndentationOutputStream* vios) {
vios->Stream() << "Dumping instructions and register lines:\n";
ScopedIndentation indent1(vios);
- IterationRange<DexInstructionIterator> instructions = code_item_->Instructions();
- for (auto inst = instructions.begin(); inst != instructions.end(); ++inst) {
- const size_t dex_pc = inst.GetDexPC(instructions.begin());
+ for (const DexInstructionPcPair& inst : code_item_->Instructions()) {
+ const size_t dex_pc = inst.DexPc();
RegisterLine* reg_line = reg_table_.GetLine(dex_pc);
if (reg_line != nullptr) {
vios->Stream() << reg_line->Dump(this) << "\n";
@@ -1955,9 +1960,8 @@ bool MethodVerifier::CodeFlowVerifyMethod() {
*/
int dead_start = -1;
- IterationRange<DexInstructionIterator> instructions = code_item_->Instructions();
- for (auto inst = instructions.begin(); inst != instructions.end(); ++inst) {
- const uint32_t insn_idx = inst.GetDexPC(instructions.begin());
+ for (const DexInstructionPcPair& inst : code_item_->Instructions()) {
+ const uint32_t insn_idx = inst.DexPc();
/*
* Switch-statement data doesn't get "visited" by scanner. It
* may or may not be preceded by a padding NOP (for alignment).
@@ -1985,7 +1989,7 @@ bool MethodVerifier::CodeFlowVerifyMethod() {
if (dead_start >= 0) {
LogVerifyInfo()
<< "dead code " << reinterpret_cast<void*>(dead_start)
- << "-" << reinterpret_cast<void*>(instructions.end().GetDexPC(instructions.begin()) - 1);
+ << "-" << reinterpret_cast<void*>(code_item_->insns_size_in_code_units_ - 1);
}
// To dump the state of the verify after a method, do something like:
// if (dex_file_->PrettyMethod(dex_method_idx_) ==
@@ -5031,7 +5035,7 @@ void MethodVerifier::VerifyISFieldAccess(const Instruction* inst, const RegType&
}
ObjPtr<mirror::Class> field_type_class =
- can_load_classes_ ? field->GetType<true>() : field->GetType<false>();
+ can_load_classes_ ? field->ResolveType() : field->LookupType();
if (field_type_class != nullptr) {
field_type = &FromClass(field->GetTypeDescriptor(),
field_type_class.Ptr(),
@@ -5179,8 +5183,8 @@ void MethodVerifier::VerifyQuickFieldAccess(const Instruction* inst, const RegTy
// Get the field type.
const RegType* field_type;
{
- ObjPtr<mirror::Class> field_type_class = can_load_classes_ ? field->GetType<true>() :
- field->GetType<false>();
+ ObjPtr<mirror::Class> field_type_class =
+ can_load_classes_ ? field->ResolveType() : field->LookupType();
if (field_type_class != nullptr) {
field_type = &FromClass(field->GetTypeDescriptor(),
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 4ebe151f76..0029eb90a3 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -268,12 +268,13 @@ const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* kl
return *reg_type;
}
-RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& allocator)
+RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& allocator, bool can_suspend)
: entries_(allocator.Adapter(kArenaAllocVerifier)),
klass_entries_(allocator.Adapter(kArenaAllocVerifier)),
can_load_classes_(can_load_classes),
allocator_(allocator) {
- if (kIsDebugBuild) {
+ DCHECK(can_suspend || !can_load_classes) << "Cannot load classes is suspension is disabled!";
+ if (kIsDebugBuild && can_suspend) {
Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
}
// The klass_entries_ array does not have primitives or small constants.
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 74d9e9de11..cb16b15054 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -61,7 +61,7 @@ static constexpr size_t kDefaultArenaBitVectorBytes = 8;
class RegTypeCache {
public:
- explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& allocator);
+ RegTypeCache(bool can_load_classes, ScopedArenaAllocator& allocator, bool can_suspend = true);
~RegTypeCache();
static void Init() REQUIRES_SHARED(Locks::mutator_lock_) {
if (!RegTypeCache::primitive_initialized_) {
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 74d11a32de..9722db9641 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -27,7 +27,7 @@
#include "jni_internal.h"
#include "mirror/class.h"
#include "mirror/throwable.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
@@ -52,6 +52,7 @@ jclass WellKnownClasses::java_lang_ClassNotFoundException;
jclass WellKnownClasses::java_lang_Daemons;
jclass WellKnownClasses::java_lang_Error;
jclass WellKnownClasses::java_lang_invoke_MethodHandle;
+jclass WellKnownClasses::java_lang_invoke_VarHandle;
jclass WellKnownClasses::java_lang_IllegalAccessError;
jclass WellKnownClasses::java_lang_NoClassDefFoundError;
jclass WellKnownClasses::java_lang_Object;
@@ -298,6 +299,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_Error = CacheClass(env, "java/lang/Error");
java_lang_IllegalAccessError = CacheClass(env, "java/lang/IllegalAccessError");
java_lang_invoke_MethodHandle = CacheClass(env, "java/lang/invoke/MethodHandle");
+ java_lang_invoke_VarHandle = CacheClass(env, "java/lang/invoke/VarHandle");
java_lang_NoClassDefFoundError = CacheClass(env, "java/lang/NoClassDefFoundError");
java_lang_reflect_Constructor = CacheClass(env, "java/lang/reflect/Constructor");
java_lang_reflect_Executable = CacheClass(env, "java/lang/reflect/Executable");
@@ -334,6 +336,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_invoke_MethodHandle_invokeExact = CacheMethod(env, java_lang_invoke_MethodHandle, false, "invokeExact", "([Ljava/lang/Object;)Ljava/lang/Object;");
java_lang_invoke_MethodHandles_lookup = CacheMethod(env, "java/lang/invoke/MethodHandles", true, "lookup", "()Ljava/lang/invoke/MethodHandles$Lookup;");
java_lang_invoke_MethodHandles_Lookup_findConstructor = CacheMethod(env, "java/lang/invoke/MethodHandles$Lookup", false, "findConstructor", "(Ljava/lang/Class;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/MethodHandle;");
+
java_lang_ref_FinalizerReference_add = CacheMethod(env, "java/lang/ref/FinalizerReference", true, "add", "(Ljava/lang/Object;)V");
java_lang_ref_ReferenceQueue_add = CacheMethod(env, "java/lang/ref/ReferenceQueue", true, "add", "(Ljava/lang/ref/Reference;)V");
@@ -434,6 +437,7 @@ void WellKnownClasses::Clear() {
java_lang_Error = nullptr;
java_lang_IllegalAccessError = nullptr;
java_lang_invoke_MethodHandle = nullptr;
+ java_lang_invoke_VarHandle = nullptr;
java_lang_NoClassDefFoundError = nullptr;
java_lang_Object = nullptr;
java_lang_OutOfMemoryError = nullptr;
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 7deef636b1..3ebcc33171 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -64,6 +64,7 @@ struct WellKnownClasses {
static jclass java_lang_Error;
static jclass java_lang_IllegalAccessError;
static jclass java_lang_invoke_MethodHandle;
+ static jclass java_lang_invoke_VarHandle;
static jclass java_lang_NoClassDefFoundError;
static jclass java_lang_Object;
static jclass java_lang_OutOfMemoryError;
diff --git a/simulator/code_simulator.cc b/simulator/code_simulator.cc
index e653dfc4fe..c04ab1c5b6 100644
--- a/simulator/code_simulator.cc
+++ b/simulator/code_simulator.cc
@@ -22,7 +22,7 @@ namespace art {
CodeSimulator* CodeSimulator::CreateCodeSimulator(InstructionSet target_isa) {
switch (target_isa) {
- case kArm64:
+ case InstructionSet::kArm64:
return arm64::CodeSimulatorArm64::CreateCodeSimulatorArm64();
default:
return nullptr;
diff --git a/simulator/code_simulator_arm64.h b/simulator/code_simulator_arm64.h
index 0542593eb2..8b665292af 100644
--- a/simulator/code_simulator_arm64.h
+++ b/simulator/code_simulator_arm64.h
@@ -25,6 +25,7 @@
#include "aarch64/simulator-aarch64.h"
#pragma GCC diagnostic pop
+#include "arch/instruction_set.h"
#include "code_simulator.h"
namespace art {
@@ -48,7 +49,7 @@ class CodeSimulatorArm64 : public CodeSimulator {
vixl::aarch64::Simulator* simulator_;
// TODO: Enable CodeSimulatorArm64 for more host ISAs once Simulator supports them.
- static constexpr bool kCanSimulate = (kRuntimeISA == kX86_64);
+ static constexpr bool kCanSimulate = (kRuntimeISA == InstructionSet::kX86_64);
DISALLOW_COPY_AND_ASSIGN(CodeSimulatorArm64);
};
diff --git a/test/044-proxy/expected.txt b/test/044-proxy/expected.txt
index 63a46200d9..eed7b7ea6d 100644
--- a/test/044-proxy/expected.txt
+++ b/test/044-proxy/expected.txt
@@ -97,3 +97,4 @@ JNI_OnLoad called
callback
Found constructor.
Found constructors with 0 exceptions
+Received OOME
diff --git a/test/044-proxy/run b/test/044-proxy/run
new file mode 100644
index 0000000000..4a322f3323
--- /dev/null
+++ b/test/044-proxy/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use a smaller heap so it's easier to fill up.
+exec ${RUN} $@ --runtime-option -Xmx4m
diff --git a/test/044-proxy/src/Main.java b/test/044-proxy/src/Main.java
index 9dadb7c6ea..e44c122e3d 100644
--- a/test/044-proxy/src/Main.java
+++ b/test/044-proxy/src/Main.java
@@ -32,6 +32,7 @@ public class Main {
FloatSelect.main(null);
NativeProxy.main(args);
ConstructorProxy.main();
+ OOMEOnDispatch.main(args);
}
// The following code maps from the actual proxy class names (eg $Proxy2) to their test output
diff --git a/test/044-proxy/src/OOMEOnDispatch.java b/test/044-proxy/src/OOMEOnDispatch.java
new file mode 100644
index 0000000000..94f267980d
--- /dev/null
+++ b/test/044-proxy/src/OOMEOnDispatch.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.util.ArrayList;
+
+/**
+ * Ensure that one can dispatch without aborting when the heap is full.
+ */
+public class OOMEOnDispatch implements InvocationHandler {
+
+ static ArrayList<Object> storage = new ArrayList<>(100000);
+
+ public static void main(String[] args) {
+ InvocationHandler handler = new OOMEOnDispatch();
+ OOMEInterface inf = (OOMEInterface)Proxy.newProxyInstance(
+ OOMEInterface.class.getClassLoader(), new Class[] { OOMEInterface.class },
+ handler);
+
+ int l = 1024 * 1024;
+ while (l > 8) {
+ try {
+ storage.add(new byte[l]);
+ } catch (OutOfMemoryError e) {
+ l = l/2;
+ }
+ }
+ // Have an extra run with the exact size of Method objects. The above loop should have
+ // filled with enough large objects for simplicity and speed, but ensure exact allocation
+ // size.
+ final int methodAsByteArrayLength = 40 - 12; // Method size - byte array overhead.
+ for (;;) {
+ try {
+ storage.add(new byte[methodAsByteArrayLength]);
+ } catch (OutOfMemoryError e) {
+ break;
+ }
+ }
+
+ try {
+ inf.foo();
+ storage.clear();
+ System.out.println("Did not receive OOME!");
+ } catch (OutOfMemoryError oome) {
+ storage.clear();
+ System.out.println("Received OOME");
+ }
+ }
+
+ public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
+ storage.clear();
+ System.out.println("Should not have reached OOMEOnDispatch.invoke!");
+ return null;
+ }
+}
+
+interface OOMEInterface {
+ public void foo();
+}
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index ad705c56d2..58b33be573 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -28,6 +28,7 @@
#include <backtrace/Backtrace.h>
#include "android-base/stringprintf.h"
+#include "base/file_utils.h"
#include "base/logging.h"
#include "base/macros.h"
#include "gc/heap.h"
diff --git a/test/203-multi-checkpoint/expected.txt b/test/203-multi-checkpoint/expected.txt
new file mode 100644
index 0000000000..e1e30e3f1d
--- /dev/null
+++ b/test/203-multi-checkpoint/expected.txt
@@ -0,0 +1,5 @@
+JNI_OnLoad called
+Other thread running
+pushing checkpoints
+checkpoints pushed
+Passed!
diff --git a/test/203-multi-checkpoint/info.txt b/test/203-multi-checkpoint/info.txt
new file mode 100644
index 0000000000..a96ba97d2a
--- /dev/null
+++ b/test/203-multi-checkpoint/info.txt
@@ -0,0 +1,4 @@
+Test that we correctly handle checkpoints that suspend.
+
+This could cause problems with asserts when there were multiple checkpoints
+queued and earlier ones suspended.
diff --git a/test/203-multi-checkpoint/multi_checkpoint.cc b/test/203-multi-checkpoint/multi_checkpoint.cc
new file mode 100644
index 0000000000..0799b6ed2d
--- /dev/null
+++ b/test/203-multi-checkpoint/multi_checkpoint.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_method-inl.h"
+#include "base/mutex-inl.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+#include "thread_pool.h"
+
+namespace art {
+
+struct TestClosure : public Closure {
+ bool first_run_start;
+ bool first_run_end;
+ bool second_run;
+ bool second_run_interleaved;
+
+ void Run(Thread* self) OVERRIDE {
+ CHECK_EQ(self, Thread::Current()) << "Not running on target thread!";
+ if (!first_run_start) {
+ CHECK(!second_run);
+ first_run_start = true;
+ // Suspend ourself so that we will perform the second run.
+ {
+ ScopedObjectAccess soa(self);
+ self->FullSuspendCheck();
+ }
+ first_run_end = true;
+ } else {
+ CHECK(!second_run);
+ CHECK(first_run_start);
+ second_run = true;
+ second_run_interleaved = !first_run_end;
+ }
+ }
+
+ void Check() {
+ CHECK(first_run_start);
+ CHECK(first_run_end);
+ CHECK(second_run);
+ CHECK(second_run_interleaved);
+ }
+};
+
+static TestClosure gTestClosure = {};
+
+extern "C" JNIEXPORT void JNICALL Java_Main_checkCheckpointsRun(JNIEnv*, jclass) {
+ gTestClosure.Check();
+}
+
+struct SetupClosure : public Closure {
+ void Run(Thread* self) OVERRIDE {
+ CHECK_EQ(self, Thread::Current()) << "Not running on target thread!";
+ ScopedObjectAccess soa(self);
+ MutexLock tscl_mu(self, *Locks::thread_suspend_count_lock_);
+ // Both should succeed since we are in runnable and have the lock.
+ CHECK(self->RequestCheckpoint(&gTestClosure)) << "Could not set first checkpoint.";
+ CHECK(self->RequestCheckpoint(&gTestClosure)) << "Could not set second checkpoint.";
+ }
+};
+
+static SetupClosure gSetupClosure = {};
+
+extern "C" JNIEXPORT void JNICALL Java_Main_pushCheckpoints(JNIEnv*, jclass, jobject thr) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ MutexLock tll_mu(self, *Locks::thread_list_lock_);
+ Thread* target = Thread::FromManagedThread(soa, thr);
+ while (true) {
+ MutexLock tscl_mu(self, *Locks::thread_suspend_count_lock_);
+ if (target->RequestCheckpoint(&gSetupClosure)) {
+ break;
+ }
+ }
+}
+
+} // namespace art
diff --git a/test/203-multi-checkpoint/src/Main.java b/test/203-multi-checkpoint/src/Main.java
new file mode 100644
index 0000000000..187f622730
--- /dev/null
+++ b/test/203-multi-checkpoint/src/Main.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.concurrent.Semaphore;
+
+public class Main {
+ static final Semaphore start = new Semaphore(0);
+ static volatile boolean continue_loop = true;
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ Thread t = new Thread(Main::runTargetThread, "Target Thread");
+
+ t.start();
+ // Wait for other thread to start.
+ start.acquire();
+
+ System.out.println("pushing checkpoints");
+ pushCheckpoints(t);
+
+ System.out.println("checkpoints pushed");
+ continue_loop = false;
+
+ t.join();
+
+ checkCheckpointsRun();
+
+ System.out.println("Passed!");
+ }
+
+ public static native void pushCheckpoints(Thread t);
+ public static native boolean checkCheckpointsRun();
+
+ public static void doNothing() {}
+ public static void runTargetThread() {
+ System.out.println("Other thread running");
+ try {
+ start.release();
+ while (continue_loop) {
+ doNothing();
+ }
+ } catch (Exception e) {
+ throw new Error("Exception occurred!", e);
+ }
+ }
+}
diff --git a/test/458-checker-instruct-simplification/src/Main.java b/test/458-checker-instruct-simplification/src/Main.java
index 262d2c1983..7797f31867 100644
--- a/test/458-checker-instruct-simplification/src/Main.java
+++ b/test/458-checker-instruct-simplification/src/Main.java
@@ -2529,15 +2529,79 @@ public class Main {
/// CHECK-DAG: <<And:i\d+>> And [<<Get>>,<<Cst1ffff>>]
/// CHECK-DAG: Return [<<And>>]
- // TODO: Simplify this. The And is useless.
-
- // CHECK-START: int Main.$noinline$getInstanceCharFieldAnd0x1ffff(Main) instruction_simplifier (after)
- // CHECK-DAG: <<Get:c\d+>> InstanceFieldGet
- // CHECK-DAG: Return [<<Get>>]
+ /// CHECK-START: int Main.$noinline$getInstanceCharFieldAnd0x1ffff(Main) instruction_simplifier (after)
+ /// CHECK-DAG: <<Get:c\d+>> InstanceFieldGet
+ /// CHECK-DAG: Return [<<Get>>]
public static int $noinline$getInstanceCharFieldAnd0x1ffff(Main m) {
return m.instanceCharField & 0x1ffff;
}
+ /// CHECK-START: int Main.$noinline$bug68142795Byte(byte) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:b\d+>> ParameterValue
+ /// CHECK-DAG: <<Const:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<And1:i\d+>> And [<<Arg>>,<<Const>>]
+ /// CHECK-DAG: <<And2:i\d+>> And [<<And1>>,<<Const>>]
+ /// CHECK-DAG: <<Conv:b\d+>> TypeConversion [<<And2>>]
+ /// CHECK-DAG: Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$bug68142795Byte(byte) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:b\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+ public static int $noinline$bug68142795Byte(byte b) {
+ return (byte)(0xff & (b & 0xff));
+ }
+
+ /// CHECK-START: int Main.$noinline$bug68142795Short(short) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: <<Const:i\d+>> IntConstant 65535
+ /// CHECK-DAG: <<And1:i\d+>> And [<<Arg>>,<<Const>>]
+ /// CHECK-DAG: <<And2:i\d+>> And [<<And1>>,<<Const>>]
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [<<And2>>]
+ /// CHECK-DAG: Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$bug68142795Short(short) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+ public static int $noinline$bug68142795Short(short s) {
+ return (short)(0xffff & (s & 0xffff));
+ }
+
+ /// CHECK-START: int Main.$noinline$bug68142795Boolean(boolean) instruction_simplifier$after_inlining (before)
+ /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<Const255:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<Select:i\d+>> Select [<<Const0>>,<<Const1>>,<<Arg>>]
+ /// CHECK-DAG: <<And:i\d+>> And [<<Const255>>,<<Select>>]
+ /// CHECK-DAG: <<Conv:b\d+>> TypeConversion [<<And>>]
+ /// CHECK-DAG: Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$bug68142795Boolean(boolean) instruction_simplifier$after_inlining (after)
+ /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+ public static int $noinline$bug68142795Boolean(boolean b) {
+ int v = b ? 1 : 0; // Should be simplified to "b" after inlining.
+ return (byte)($inline$get255() & v);
+ }
+
+ /// CHECK-START: int Main.$noinline$bug68142795Elaborate(byte) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:b\d+>> ParameterValue
+ /// CHECK-DAG: <<Int255:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<Long255:j\d+>> LongConstant 255
+ /// CHECK-DAG: <<And1:i\d+>> And [<<Arg>>,<<Int255>>]
+ /// CHECK-DAG: <<Conv1:j\d+>> TypeConversion [<<And1>>]
+ /// CHECK-DAG: <<And2:j\d+>> And [<<Conv1>>,<<Long255>>]
+ /// CHECK-DAG: <<Conv2:i\d+>> TypeConversion [<<And2>>]
+ /// CHECK-DAG: <<Conv3:b\d+>> TypeConversion [<<Conv2>>]
+ /// CHECK-DAG: Return [<<Conv3>>]
+
+ /// CHECK-START: int Main.$noinline$bug68142795Elaborate(byte) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:b\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+ public static int $noinline$bug68142795Elaborate(byte b) {
+ return (byte)((int)(((long)(b & 0xff)) & 255L));
+ }
+
public static void main(String[] args) {
int arg = 123456;
float floatArg = 123456.125f;
@@ -2772,10 +2836,20 @@ public class Main {
m.instanceCharField = 'x';
assertIntEquals('x', $noinline$getInstanceCharFieldAnd0x1ffff(m));
+
+ assertIntEquals(0x7f, $noinline$bug68142795Byte((byte) 0x7f));
+ assertIntEquals((byte) 0x80, $noinline$bug68142795Byte((byte) 0x80));
+ assertIntEquals(0x7fff, $noinline$bug68142795Short((short) 0x7fff));
+ assertIntEquals((short) 0x8000, $noinline$bug68142795Short((short) 0x8000));
+ assertIntEquals(0, $noinline$bug68142795Boolean(false));
+ assertIntEquals(1, $noinline$bug68142795Boolean(true));
+ assertIntEquals(0x7f, $noinline$bug68142795Elaborate((byte) 0x7f));
+ assertIntEquals((byte) 0x80, $noinline$bug68142795Elaborate((byte) 0x80));
}
private static boolean $inline$true() { return true; }
private static boolean $inline$false() { return false; }
+ private static int $inline$get255() { return 255; }
public static boolean booleanField;
diff --git a/test/484-checker-register-hints/smali/Smali.smali b/test/484-checker-register-hints/smali/Smali.smali
new file mode 100644
index 0000000000..659493611f
--- /dev/null
+++ b/test/484-checker-register-hints/smali/Smali.smali
@@ -0,0 +1,143 @@
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+.class public LSmali;
+.super Ljava/lang/Object;
+
+## CHECK-START: void Smali.test3Order1(boolean, int, int, int, int, int) register (after)
+## CHECK: name "B0"
+## CHECK-NOT: ParallelMove
+## CHECK: name "B1"
+## CHECK-NOT: end_block
+## CHECK: If
+## CHECK-NOT: ParallelMove
+## CHECK: name "B6"
+## CHECK-NOT: end_block
+## CHECK: InstanceFieldSet
+# We could check here that there is a parallel move, but it's only valid
+# for some architectures (for example x86), as other architectures may
+# not do move at all.
+## CHECK: end_block
+## CHECK-NOT: ParallelMove
+.method public static test3Order1(ZIIIII)V
+ .registers 14
+
+ sget v0, LMain;->live1:I
+ sget v1, LMain;->live2:I
+ sget v2, LMain;->live3:I
+ sget v5, LMain;->live0:I
+ if-eqz p0, :cond_13
+
+ sput v0, LMain;->live1:I
+
+ :goto_c
+ add-int v6, v0, v1
+ add-int/2addr v6, v2
+ add-int/2addr v6, v5
+ sput v6, LMain;->live1:I
+
+ return-void
+
+ :cond_13
+ sget-boolean v6, LMain;->y:Z
+
+ if-eqz v6, :cond_1a
+ sput v0, LMain;->live1:I
+ goto :goto_c
+
+ :cond_1a
+ sget v3, LMain;->live4:I
+
+ sget v4, LMain;->live5:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v0, v4
+ add-int/2addr v7, v3
+ iput v7, v6, LMain$Foo;->field2:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v1, v4
+ add-int/2addr v7, v3
+ iput v7, v6, LMain$Foo;->field3:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v2, v4
+ add-int/2addr v7, v3
+ iput v7, v6, LMain$Foo;->field4:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ iput v3, v6, LMain$Foo;->field0:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v4, v3
+ iput v7, v6, LMain$Foo;->field1:I
+ goto :goto_c
+.end method
+
+## CHECK-START: void Smali.test3Order2(boolean, int, int, int, int, int) register (after)
+## CHECK: name "B0"
+## CHECK-NOT: ParallelMove
+## CHECK: name "B1"
+## CHECK-NOT: end_block
+## CHECK: If
+## CHECK-NOT: ParallelMove
+## CHECK: name "B5"
+## CHECK-NOT: end_block
+## CHECK: InstanceFieldSet
+# We could check here that there is a parallel move, but it's only valid
+# for some architectures (for example x86), as other architectures may
+# not do move at all.
+## CHECK: end_block
+## CHECK-NOT: ParallelMove
+.method public static test3Order2(ZIIIII)V
+ .registers 14
+
+ sget v0, LMain;->live1:I
+ sget v1, LMain;->live2:I
+ sget v2, LMain;->live3:I
+ sget v3, LMain;->live0:I
+ if-eqz p0, :cond_d
+
+ sput v0, LMain;->live1:I
+ goto :goto_37
+
+ :cond_d
+ sget-boolean v4, LMain;->y:Z
+ if-eqz v4, :cond_14
+
+ sput v0, LMain;->live1:I
+ goto :goto_37
+
+ :cond_14
+ sget v4, LMain;->live4:I
+ sget v5, LMain;->live5:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v0, v5
+ add-int/2addr v7, v4
+ iput v7, v6, LMain$Foo;->field2:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v1, v5
+ add-int/2addr v7, v4
+ iput v7, v6, LMain$Foo;->field3:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v2, v5
+ add-int/2addr v7, v4
+ iput v7, v6, LMain$Foo;->field4:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ iput v4, v6, LMain$Foo;->field0:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v5, v4
+ iput v7, v6, LMain$Foo;->field1:I
+ :goto_37
+
+ add-int v4, v0, v1
+ add-int/2addr v4, v2
+ add-int/2addr v4, v3
+ sput v4, LMain;->live1:I
+ return-void
+.end method
diff --git a/test/484-checker-register-hints/src/Main.java b/test/484-checker-register-hints/src/Main.java
index 6e68f7c91e..7aab6598a1 100644
--- a/test/484-checker-register-hints/src/Main.java
+++ b/test/484-checker-register-hints/src/Main.java
@@ -98,18 +98,6 @@ public class Main {
/// CHECK: name "B0"
/// CHECK-NOT: ParallelMove
/// CHECK: name "B1"
- /// CHECK-NOT: end_block
- /// CHECK: If
- /// CHECK-NOT: ParallelMove
- /// CHECK: name "B6"
- /// CHECK-NOT: end_block
- /// CHECK: InstanceFieldSet
- // We could check here that there is a parallel move, but it's only valid
- // for some architectures (for example x86), as other architectures may
- // not do move at all.
- /// CHECK: end_block
- /// CHECK-NOT: ParallelMove
-
public static void test3(boolean z, int a, int b, int c, int d, int m) {
// Same version as test2, but with branches reversed, to ensure
// whatever linear order is computed, we will get the same results.
diff --git a/test/593-checker-boolean-2-integral-conv/build b/test/593-checker-boolean-2-integral-conv/build
index 3721955670..49292c9ac1 100755
--- a/test/593-checker-boolean-2-integral-conv/build
+++ b/test/593-checker-boolean-2-integral-conv/build
@@ -20,7 +20,4 @@ export USE_JACK=false
# Also disable desugar because it is missing in jack platform builds.
export DESUGAR=false
-# See b/65168732
-export USE_D8=false
-
./default-build "$@"
diff --git a/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali b/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
index 00ebaaf451..494ab95434 100644
--- a/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
+++ b/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
@@ -30,6 +30,143 @@
return-void
.end method
+## CHECK-START: byte SmaliTests.booleanToByte(boolean) builder (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
+## CHECK-DAG: If [<<Cond>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<IToS:b\d+>> TypeConversion [<<Phi>>]
+## CHECK-DAG: Return [<<IToS>>]
+
+## CHECK-START: byte SmaliTests.booleanToByte(boolean) select_generator (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
+## CHECK-DAG: <<IToS:b\d+>> TypeConversion [<<Sel>>]
+## CHECK-DAG: Return [<<IToS>>]
+
+## CHECK-START: byte SmaliTests.booleanToByte(boolean) instruction_simplifier$after_bce (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: Return [<<Arg>>]
+.method static booleanToByte(Z)B
+ .registers 2
+ if-eqz p0, :cond_5
+ const/4 v0, 0x1
+
+ :goto_3
+ int-to-byte v0, v0
+ return v0
+
+ :cond_5
+ const/4 v0, 0x0
+ goto :goto_3
+.end method
+
+## CHECK-START: short SmaliTests.booleanToShort(boolean) builder (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
+## CHECK-DAG: If [<<Cond>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<IToS:s\d+>> TypeConversion [<<Phi>>]
+## CHECK-DAG: Return [<<IToS>>]
+
+## CHECK-START: short SmaliTests.booleanToShort(boolean) select_generator (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
+## CHECK-DAG: <<IToS:s\d+>> TypeConversion [<<Sel>>]
+## CHECK-DAG: Return [<<IToS>>]
+
+## CHECK-START: short SmaliTests.booleanToShort(boolean) instruction_simplifier$after_bce (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: Return [<<Arg>>]
+.method static booleanToShort(Z)S
+ .registers 2
+ if-eqz p0, :cond_5
+ const/4 v0, 0x1
+
+ :goto_3
+ int-to-short v0, v0
+ return v0
+
+ :cond_5
+ const/4 v0, 0x0
+ goto :goto_3
+.end method
+
+## CHECK-START: char SmaliTests.booleanToChar(boolean) builder (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
+## CHECK-DAG: If [<<Cond>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<IToC:c\d+>> TypeConversion [<<Phi>>]
+## CHECK-DAG: Return [<<IToC>>]
+
+## CHECK-START: char SmaliTests.booleanToChar(boolean) select_generator (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
+## CHECK-DAG: <<IToC:c\d+>> TypeConversion [<<Sel>>]
+## CHECK-DAG: Return [<<IToC>>]
+
+## CHECK-START: char SmaliTests.booleanToChar(boolean) instruction_simplifier$after_bce (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: Return [<<Arg>>]
+.method static booleanToChar(Z)C
+ .registers 2
+ if-eqz p0, :cond_5
+ const/4 v0, 0x1
+
+ :goto_3
+ int-to-char v0, v0
+ return v0
+
+ :cond_5
+ const/4 v0, 0x0
+ goto :goto_3
+.end method
+
+## CHECK-START: int SmaliTests.booleanToInt(boolean) builder (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
+## CHECK-DAG: If [<<Cond>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: Return [<<Phi>>]
+
+## CHECK-START: int SmaliTests.booleanToInt(boolean) select_generator (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
+## CHECK-DAG: Return [<<Sel>>]
+
+## CHECK-START: int SmaliTests.booleanToInt(boolean) instruction_simplifier$after_bce (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: Return [<<Arg>>]
+.method static booleanToInt(Z)I
+ .registers 2
+ if-eqz p0, :cond_4
+ const/4 v0, 0x1
+
+ :goto_3
+ return v0
+
+ :cond_4
+ const/4 v0, 0x0
+ goto :goto_3
+.end method
+
## CHECK-START: long SmaliTests.booleanToLong(boolean) builder (after)
## CHECK-DAG: <<Arg:z\d+>> ParameterValue
## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
diff --git a/test/593-checker-boolean-2-integral-conv/src/Main.java b/test/593-checker-boolean-2-integral-conv/src/Main.java
index 3503b2e877..fdc0919f2b 100644
--- a/test/593-checker-boolean-2-integral-conv/src/Main.java
+++ b/test/593-checker-boolean-2-integral-conv/src/Main.java
@@ -32,24 +32,6 @@ public class Main {
System.out.println("passed");
}
- /// CHECK-START: byte Main.booleanToByte(boolean) builder (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
- /// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<IToS:b\d+>> TypeConversion [<<Phi>>]
- /// CHECK-DAG: Return [<<IToS>>]
-
- /// CHECK-START: byte Main.booleanToByte(boolean) select_generator (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
- /// CHECK-DAG: <<IToS:b\d+>> TypeConversion [<<Sel>>]
- /// CHECK-DAG: Return [<<IToS>>]
-
/// CHECK-START: byte Main.booleanToByte(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -58,24 +40,6 @@ public class Main {
return (byte)(b ? 1 : 0);
}
- /// CHECK-START: short Main.booleanToShort(boolean) builder (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
- /// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<IToS:s\d+>> TypeConversion [<<Phi>>]
- /// CHECK-DAG: Return [<<IToS>>]
-
- /// CHECK-START: short Main.booleanToShort(boolean) select_generator (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
- /// CHECK-DAG: <<IToS:s\d+>> TypeConversion [<<Sel>>]
- /// CHECK-DAG: Return [<<IToS>>]
-
/// CHECK-START: short Main.booleanToShort(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -84,24 +48,6 @@ public class Main {
return (short)(b ? 1 : 0);
}
- /// CHECK-START: char Main.booleanToChar(boolean) builder (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
- /// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<IToC:c\d+>> TypeConversion [<<Phi>>]
- /// CHECK-DAG: Return [<<IToC>>]
-
- /// CHECK-START: char Main.booleanToChar(boolean) select_generator (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
- /// CHECK-DAG: <<IToC:c\d+>> TypeConversion [<<Sel>>]
- /// CHECK-DAG: Return [<<IToC>>]
-
/// CHECK-START: char Main.booleanToChar(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -110,22 +56,6 @@ public class Main {
return (char)(b ? 1 : 0);
}
- /// CHECK-START: int Main.booleanToInt(boolean) builder (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
- /// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: Return [<<Phi>>]
-
- /// CHECK-START: int Main.booleanToInt(boolean) select_generator (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
- /// CHECK-DAG: Return [<<Sel>>]
-
/// CHECK-START: int Main.booleanToInt(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index f6d3bbab28..3ef8fe64bb 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -281,16 +281,14 @@ public class Main {
}
/// CHECK-START: void Main.string2Bytes(char[], java.lang.String) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.string2Bytes(char[], java.lang.String) loop_optimization (after)
/// CHECK-NOT: VecLoad
//
/// CHECK-START-ARM64: void Main.string2Bytes(char[], java.lang.String) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
// NOTE: should correctly deal with compressed and uncompressed cases.
@@ -333,25 +331,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.oneBoth(short[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi:i\d+>>,<<Repl>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.oneBoth(short[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi:i\d+>>,<<Repl>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.oneBoth(short[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi:i\d+>>,<<Repl>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
//
// Bug b/37764324: integral same-length packed types can be mixed freely.
private static void oneBoth(short[] a, char[] b) {
@@ -382,12 +377,10 @@ public class Main {
/// CHECK-START-ARM: void Main.typeConv(byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1:i\d+>>] loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: <<Vadd:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi1>>,<<Vadd>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2:i\d+>>] loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Add>>] loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi2>>,<<Cnv>>] loop:<<Loop2>> outer_loop:none
@@ -395,12 +388,10 @@ public class Main {
/// CHECK-START-ARM64: void Main.typeConv(byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1:i\d+>>] loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: <<Vadd:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi1>>,<<Vadd>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2:i\d+>>] loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Add>>] loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi2>>,<<Cnv>>] loop:<<Loop2>> outer_loop:none
@@ -408,12 +399,10 @@ public class Main {
/// CHECK-START-MIPS64: void Main.typeConv(byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1:i\d+>>] loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: <<Vadd:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi1>>,<<Vadd>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2:i\d+>>] loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Add>>] loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi2>>,<<Cnv>>] loop:<<Loop2>> outer_loop:none
diff --git a/test/640-checker-boolean-simd/src/Main.java b/test/640-checker-boolean-simd/src/Main.java
index c337ef4fed..347f916c8d 100644
--- a/test/640-checker-boolean-simd/src/Main.java
+++ b/test/640-checker-boolean-simd/src/Main.java
@@ -26,25 +26,21 @@ public class Main {
//
/// CHECK-START: void Main.and(boolean) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.and(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAnd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.and(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAnd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.and(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAnd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void and(boolean x) {
@@ -53,25 +49,21 @@ public class Main {
}
/// CHECK-START: void Main.or(boolean) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.or(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecOr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.or(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecOr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.or(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecOr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void or(boolean x) {
@@ -80,25 +72,21 @@ public class Main {
}
/// CHECK-START: void Main.xor(boolean) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.xor(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecXor loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.xor(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecXor loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.xor(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecXor loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void xor(boolean x) {
@@ -107,25 +95,21 @@ public class Main {
}
/// CHECK-START: void Main.not() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
diff --git a/test/640-checker-byte-simd/src/Main.java b/test/640-checker-byte-simd/src/Main.java
index dc7aaf7f05..5c13fc3926 100644
--- a/test/640-checker-byte-simd/src/Main.java
+++ b/test/640-checker-byte-simd/src/Main.java
@@ -26,25 +26,21 @@ public class Main {
//
/// CHECK-START: void Main.add(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(int x) {
@@ -53,25 +49,21 @@ public class Main {
}
/// CHECK-START: void Main.sub(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(int x) {
@@ -80,25 +72,21 @@ public class Main {
}
/// CHECK-START: void Main.mul(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(int x) {
@@ -107,8 +95,7 @@ public class Main {
}
/// CHECK-START: void Main.div(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.div(int) loop_optimization (after)
@@ -121,25 +108,21 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -148,25 +131,21 @@ public class Main {
}
/// CHECK-START: void Main.not() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
@@ -175,25 +154,21 @@ public class Main {
}
/// CHECK-START: void Main.shl4() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
@@ -202,25 +177,21 @@ public class Main {
}
/// CHECK-START: void Main.sar2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
@@ -229,8 +200,7 @@ public class Main {
}
/// CHECK-START: void Main.shr2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
// TODO: would need signess flip.
diff --git a/test/640-checker-char-simd/src/Main.java b/test/640-checker-char-simd/src/Main.java
index 0ba596389d..b3dff1411b 100644
--- a/test/640-checker-char-simd/src/Main.java
+++ b/test/640-checker-char-simd/src/Main.java
@@ -26,25 +26,21 @@ public class Main {
//
/// CHECK-START: void Main.add(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(int x) {
@@ -53,25 +49,21 @@ public class Main {
}
/// CHECK-START: void Main.sub(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(int x) {
@@ -80,25 +72,21 @@ public class Main {
}
/// CHECK-START: void Main.mul(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(int x) {
@@ -107,8 +95,7 @@ public class Main {
}
/// CHECK-START: void Main.div(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.div(int) loop_optimization (after)
@@ -121,25 +108,21 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -148,25 +131,21 @@ public class Main {
}
/// CHECK-START: void Main.not() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
@@ -175,25 +154,21 @@ public class Main {
}
/// CHECK-START: void Main.shl4() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
@@ -202,8 +177,7 @@ public class Main {
}
/// CHECK-START: void Main.sar2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
// TODO: would need signess flip.
@@ -215,25 +189,21 @@ public class Main {
}
/// CHECK-START: void Main.shr2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shr2() {
diff --git a/test/640-checker-double-simd/src/Main.java b/test/640-checker-double-simd/src/Main.java
index f7492d5578..5d0899864a 100644
--- a/test/640-checker-double-simd/src/Main.java
+++ b/test/640-checker-double-simd/src/Main.java
@@ -27,19 +27,16 @@ public class Main {
//
/// CHECK-START: void Main.add(double) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(double x) {
@@ -48,19 +45,16 @@ public class Main {
}
/// CHECK-START: void Main.sub(double) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(double x) {
@@ -69,19 +63,16 @@ public class Main {
}
/// CHECK-START: void Main.mul(double) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.mul(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(double x) {
@@ -90,19 +81,16 @@ public class Main {
}
/// CHECK-START: void Main.div(double) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.div(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.div(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void div(double x) {
@@ -111,19 +99,16 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -132,19 +117,16 @@ public class Main {
}
/// CHECK-START: void Main.abs() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.abs() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.abs() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void abs() {
@@ -153,8 +135,7 @@ public class Main {
}
/// CHECK-START: void Main.conv(long[]) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.conv(long[]) loop_optimization (after)
diff --git a/test/640-checker-float-simd/src/Main.java b/test/640-checker-float-simd/src/Main.java
index 4fe9675afe..c7883f37a3 100644
--- a/test/640-checker-float-simd/src/Main.java
+++ b/test/640-checker-float-simd/src/Main.java
@@ -27,19 +27,16 @@ public class Main {
//
/// CHECK-START: void Main.add(float) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(float x) {
@@ -48,19 +45,16 @@ public class Main {
}
/// CHECK-START: void Main.sub(float) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(float x) {
@@ -69,19 +63,16 @@ public class Main {
}
/// CHECK-START: void Main.mul(float) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.mul(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(float x) {
@@ -90,19 +81,16 @@ public class Main {
}
/// CHECK-START: void Main.div(float) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.div(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.div(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void div(float x) {
@@ -111,19 +99,16 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -132,19 +117,16 @@ public class Main {
}
/// CHECK-START: void Main.abs() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.abs() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.abs() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void abs() {
@@ -153,19 +135,16 @@ public class Main {
}
/// CHECK-START: void Main.conv(int[]) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.conv(int[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecCnv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.conv(int[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecCnv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void conv(int[] b) {
diff --git a/test/640-checker-int-simd/src/Main.java b/test/640-checker-int-simd/src/Main.java
index 10dd340129..aa230bfcaf 100644
--- a/test/640-checker-int-simd/src/Main.java
+++ b/test/640-checker-int-simd/src/Main.java
@@ -26,25 +26,21 @@ public class Main {
//
/// CHECK-START: void Main.add(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(int x) {
@@ -53,25 +49,21 @@ public class Main {
}
/// CHECK-START: void Main.sub(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(int x) {
@@ -80,25 +72,21 @@ public class Main {
}
/// CHECK-START: void Main.mul(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(int x) {
@@ -107,8 +95,7 @@ public class Main {
}
/// CHECK-START: void Main.div(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.div(int) loop_optimization (after)
@@ -122,25 +109,21 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -149,25 +132,21 @@ public class Main {
}
/// CHECK-START: void Main.not() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
- //
+ //
/// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
@@ -176,25 +155,21 @@ public class Main {
}
/// CHECK-START: void Main.shl4() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
- //
+ //
/// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
@@ -203,25 +178,21 @@ public class Main {
}
/// CHECK-START: void Main.sar2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
@@ -230,25 +201,21 @@ public class Main {
}
/// CHECK-START: void Main.shr2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shr2() {
@@ -267,30 +234,25 @@ public class Main {
/// CHECK-START: void Main.shr32() instruction_simplifier$after_inlining (before)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 32 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.shr32() instruction_simplifier$after_inlining (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shr32() loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr32() loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr32() loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
static void shr32() {
// TODO: remove a[i] = a[i] altogether?
for (int i = 0; i < 128; i++)
@@ -299,38 +261,33 @@ public class Main {
/// CHECK-START: void Main.shr33() instruction_simplifier$after_inlining (before)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 33 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.shr33() instruction_simplifier$after_inlining (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shr33() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr33() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr33() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
static void shr33() {
for (int i = 0; i < 128; i++)
a[i] >>>= $opt$inline$IntConstant33(); // 1, since & 31
@@ -338,38 +295,33 @@ public class Main {
/// CHECK-START: void Main.shrMinus254() instruction_simplifier$after_inlining (before)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant -254 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.shrMinus254() instruction_simplifier$after_inlining (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shrMinus254() loop_optimization (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shrMinus254() loop_optimization (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shrMinus254() loop_optimization (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
static void shrMinus254() {
for (int i = 0; i < 128; i++)
a[i] >>>= $opt$inline$IntConstantMinus254(); // 2, since & 31
diff --git a/test/640-checker-long-simd/src/Main.java b/test/640-checker-long-simd/src/Main.java
index 05dcae6f83..c754f2a309 100644
--- a/test/640-checker-long-simd/src/Main.java
+++ b/test/640-checker-long-simd/src/Main.java
@@ -26,19 +26,16 @@ public class Main {
//
/// CHECK-START: void Main.add(long) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(long) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(long) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(long x) {
@@ -47,19 +44,16 @@ public class Main {
}
/// CHECK-START: void Main.sub(long) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(long) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(long) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(long x) {
@@ -68,13 +62,11 @@ public class Main {
}
/// CHECK-START: void Main.mul(long) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(long) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
@@ -87,8 +79,7 @@ public class Main {
}
/// CHECK-START: void Main.div(long) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.div(long) loop_optimization (after)
@@ -102,19 +93,16 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -123,19 +111,16 @@ public class Main {
}
/// CHECK-START: void Main.not() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
@@ -144,19 +129,16 @@ public class Main {
}
/// CHECK-START: void Main.shl4() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
@@ -165,19 +147,16 @@ public class Main {
}
/// CHECK-START: void Main.sar2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
@@ -186,19 +165,16 @@ public class Main {
}
/// CHECK-START: void Main.shr2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shr2() {
@@ -217,25 +193,21 @@ public class Main {
/// CHECK-START: void Main.shr64() instruction_simplifier$after_inlining (before)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 64 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.shr64() instruction_simplifier$after_inlining (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr64() loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr64() loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
static void shr64() {
// TODO: remove a[i] = a[i] altogether?
for (int i = 0; i < 128; i++)
@@ -244,31 +216,27 @@ public class Main {
/// CHECK-START: void Main.shr65() instruction_simplifier$after_inlining (before)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 65 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.shr65() instruction_simplifier$after_inlining (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr65() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr65() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
static void shr65() {
for (int i = 0; i < 128; i++)
a[i] >>>= $opt$inline$IntConstant65(); // 1, since & 63
@@ -276,31 +244,27 @@ public class Main {
/// CHECK-START: void Main.shrMinus254() instruction_simplifier$after_inlining (before)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant -254 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.shrMinus254() instruction_simplifier$after_inlining (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shrMinus254() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shrMinus254() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
static void shrMinus254() {
for (int i = 0; i < 128; i++)
a[i] >>>= $opt$inline$IntConstantMinus254(); // 2, since & 63
diff --git a/test/640-checker-short-simd/src/Main.java b/test/640-checker-short-simd/src/Main.java
index 9dc084d1df..e187397853 100644
--- a/test/640-checker-short-simd/src/Main.java
+++ b/test/640-checker-short-simd/src/Main.java
@@ -26,25 +26,21 @@ public class Main {
//
/// CHECK-START: void Main.add(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(int x) {
@@ -53,25 +49,21 @@ public class Main {
}
/// CHECK-START: void Main.sub(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(int x) {
@@ -80,25 +72,21 @@ public class Main {
}
/// CHECK-START: void Main.mul(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(int x) {
@@ -107,8 +95,7 @@ public class Main {
}
/// CHECK-START: void Main.div(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.div(int) loop_optimization (after)
@@ -121,25 +108,21 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -148,25 +131,21 @@ public class Main {
}
/// CHECK-START: void Main.not() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
@@ -175,25 +154,21 @@ public class Main {
}
/// CHECK-START: void Main.shl4() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
@@ -202,25 +177,21 @@ public class Main {
}
/// CHECK-START: void Main.sar2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
@@ -229,8 +200,7 @@ public class Main {
}
/// CHECK-START: void Main.shr2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
// TODO: would need signess flip.
diff --git a/test/645-checker-abs-simd/src/Main.java b/test/645-checker-abs-simd/src/Main.java
index fbbd87c035..823908c20e 100644
--- a/test/645-checker-abs-simd/src/Main.java
+++ b/test/645-checker-abs-simd/src/Main.java
@@ -29,36 +29,30 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitByte(byte[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM64: void Main.doitByte(byte[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-MIPS64: void Main.doitByte(byte[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
@@ -91,36 +85,30 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitShort(short[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM64: void Main.doitShort(short[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-MIPS64: void Main.doitShort(short[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
@@ -137,16 +125,15 @@ public class Main {
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM64: void Main.doitCastedChar(char[]) loop_optimization (after)
- // CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- // CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
- // CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
- // CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- // CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- // CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.doitCastedChar(char[]) loop_optimization (after)
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
- // CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
private static void doitCastedChar(char[] x) {
for (int i = 0; i < x.length; i++) {
@@ -161,36 +148,30 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitInt(int[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM64: void Main.doitInt(int[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-MIPS64: void Main.doitInt(int[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
@@ -208,24 +189,20 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitLong(long[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsLong loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-MIPS64: void Main.doitLong(long[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsLong loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
@@ -240,27 +217,23 @@ public class Main {
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsFloat loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitFloat(float[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsFloat loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-MIPS64: void Main.doitFloat(float[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsFloat loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
@@ -278,24 +251,20 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitDouble(double[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsDouble loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-MIPS64: void Main.doitDouble(double[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsDouble loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
diff --git a/test/646-checker-hadd-alt-byte/src/Main.java b/test/646-checker-hadd-alt-byte/src/Main.java
index 69697f7bee..41aa40cd6d 100644
--- a/test/646-checker-hadd-alt-byte/src/Main.java
+++ b/test/646-checker-hadd-alt-byte/src/Main.java
@@ -40,25 +40,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -89,27 +86,23 @@ public class Main {
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-ARM64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-MIPS64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -129,25 +122,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -180,27 +170,23 @@ public class Main {
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -219,28 +205,25 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -269,30 +252,26 @@ public class Main {
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- // CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-ARM64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- // CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- // CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
diff --git a/test/1929-exception-catch-exception/build b/test/646-checker-hadd-alt-char/build
index 10ffcc537d..10ffcc537d 100644..100755
--- a/test/1929-exception-catch-exception/build
+++ b/test/646-checker-hadd-alt-char/build
diff --git a/test/646-checker-hadd-alt-char/src/Main.java b/test/646-checker-hadd-alt-char/src/Main.java
index 6a7c2a9aa8..8f879c77d0 100644
--- a/test/646-checker-hadd-alt-char/src/Main.java
+++ b/test/646-checker-hadd-alt-char/src/Main.java
@@ -40,25 +40,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -91,25 +88,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
//
@@ -132,25 +126,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -184,25 +175,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
//
@@ -224,28 +212,25 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(char[] b1, char[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -274,21 +259,26 @@ public class Main {
/// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START-ARM: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
/// CHECK-START-ARM64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
//
diff --git a/test/484-checker-register-hints/build b/test/646-checker-hadd-alt-short/build
index 10ffcc537d..10ffcc537d 100644..100755
--- a/test/484-checker-register-hints/build
+++ b/test/646-checker-hadd-alt-short/build
diff --git a/test/646-checker-hadd-alt-short/src/Main.java b/test/646-checker-hadd-alt-short/src/Main.java
index 1378e6c261..b591081fba 100644
--- a/test/646-checker-hadd-alt-short/src/Main.java
+++ b/test/646-checker-hadd-alt-short/src/Main.java
@@ -40,25 +40,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -89,27 +86,23 @@ public class Main {
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-ARM64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-MIPS64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -129,25 +122,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -180,27 +170,23 @@ public class Main {
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -219,28 +205,25 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -269,30 +252,26 @@ public class Main {
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- // CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-ARM64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- // CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- // CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
diff --git a/test/646-checker-hadd-byte/src/Main.java b/test/646-checker-hadd-byte/src/Main.java
index ee5b2a2e36..4d259c437b 100644
--- a/test/646-checker-hadd-byte/src/Main.java
+++ b/test/646-checker-hadd-byte/src/Main.java
@@ -37,25 +37,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -86,27 +83,23 @@ public class Main {
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-ARM64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-MIPS64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -126,25 +119,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -177,27 +167,23 @@ public class Main {
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -216,28 +202,25 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -266,30 +249,26 @@ public class Main {
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- // CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-ARM64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- // CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- // CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
diff --git a/test/611-checker-simplify-if/build b/test/646-checker-hadd-char/build
index 10ffcc537d..10ffcc537d 100644..100755
--- a/test/611-checker-simplify-if/build
+++ b/test/646-checker-hadd-char/build
diff --git a/test/646-checker-hadd-char/src/Main.java b/test/646-checker-hadd-char/src/Main.java
index 7d4ca4e559..6549dab9ff 100644
--- a/test/646-checker-hadd-char/src/Main.java
+++ b/test/646-checker-hadd-char/src/Main.java
@@ -37,25 +37,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -87,25 +84,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
//
@@ -128,25 +122,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -180,25 +171,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
//
@@ -220,28 +208,23 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(char[] b1, char[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -271,28 +254,25 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
//
diff --git a/test/646-checker-hadd-short/build b/test/646-checker-hadd-short/build
new file mode 100755
index 0000000000..10ffcc537d
--- /dev/null
+++ b/test/646-checker-hadd-short/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/646-checker-hadd-short/src/Main.java b/test/646-checker-hadd-short/src/Main.java
index f831ec2bb8..55bb958670 100644
--- a/test/646-checker-hadd-short/src/Main.java
+++ b/test/646-checker-hadd-short/src/Main.java
@@ -37,25 +37,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -78,25 +75,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_alt(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -128,27 +122,23 @@ public class Main {
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
- //
- // CHECK-START-ARM64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
- //
- // CHECK-START-MIPS64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -168,25 +158,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -206,25 +193,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed_alt(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -248,25 +232,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_signed_alt2(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed_alt2(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed_alt2(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed_alt2(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -300,27 +281,23 @@ public class Main {
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
- //
- // CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
- //
- // CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -353,27 +330,23 @@ public class Main {
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
- //
- // CHECK-START-ARM64: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
- //
- // CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned_alt(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -393,28 +366,25 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -443,30 +413,26 @@ public class Main {
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- // CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
- //
- // CHECK-START-ARM64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- // CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
- //
- // CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- // CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
diff --git a/test/651-checker-byte-simd-minmax/src/Main.java b/test/651-checker-byte-simd-minmax/src/Main.java
index fcf62d3c4c..d365689f5d 100644
--- a/test/651-checker-byte-simd-minmax/src/Main.java
+++ b/test/651-checker-byte-simd-minmax/src/Main.java
@@ -28,25 +28,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -54,7 +51,7 @@ public class Main {
}
}
- /// CHECK-START-ARM64: void Main.doitMinUnsigned(byte[], byte[], byte[]) instruction_simplifier (before)
+ /// CHECK-START: void Main.doitMinUnsigned(byte[], byte[], byte[]) instruction_simplifier (before)
/// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -65,7 +62,7 @@ public class Main {
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- /// CHECK-START-ARM64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-START: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (before)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -73,27 +70,23 @@ public class Main {
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-ARM64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-MIPS64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-MIPS64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMinUnsigned(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -110,25 +103,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -136,7 +126,7 @@ public class Main {
}
}
- /// CHECK-START-ARM64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) instruction_simplifier (before)
+ /// CHECK-START: void Main.doitMaxUnsigned(byte[], byte[], byte[]) instruction_simplifier (before)
/// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -147,7 +137,7 @@ public class Main {
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- /// CHECK-START-ARM64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-START: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (before)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -155,27 +145,23 @@ public class Main {
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-ARM64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-MIPS64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-MIPS64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMaxUnsigned(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -192,12 +178,11 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin100(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I100:i\d+>> IntConstant 100 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I100:i\d+>> IntConstant 100 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin100(byte[] x, byte[] y) {
int min = Math.min(x.length, y.length);
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-char-simd-minmax/src/Main.java b/test/651-checker-char-simd-minmax/src/Main.java
index 93f21f823b..72e8958ad8 100644
--- a/test/651-checker-char-simd-minmax/src/Main.java
+++ b/test/651-checker-char-simd-minmax/src/Main.java
@@ -28,25 +28,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(char[] x, char[] y, char[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -63,25 +60,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(char[] x, char[] y, char[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -98,12 +92,11 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin100(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<I100:i\d+>> IntConstant 100 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I100:i\d+>> IntConstant 100 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin100(char[] x, char[] y) {
int min = Math.min(x.length, y.length);
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-double-simd-minmax/src/Main.java b/test/651-checker-double-simd-minmax/src/Main.java
index 23a6d54d9e..6b12e7e63c 100644
--- a/test/651-checker-double-simd-minmax/src/Main.java
+++ b/test/651-checker-double-simd-minmax/src/Main.java
@@ -30,11 +30,10 @@ public class Main {
// TODO MIPS64: min(x, NaN)?
//
/// CHECK-START-ARM64: void Main.doitMin(double[], double[], double[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(double[] x, double[] y, double[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -53,11 +52,10 @@ public class Main {
// TODO MIPS64: max(x, NaN)?
//
/// CHECK-START-ARM64: void Main.doitMax(double[], double[], double[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(double[] x, double[] y, double[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-float-simd-minmax/src/Main.java b/test/651-checker-float-simd-minmax/src/Main.java
index 3959c821c4..278a9c9367 100644
--- a/test/651-checker-float-simd-minmax/src/Main.java
+++ b/test/651-checker-float-simd-minmax/src/Main.java
@@ -30,11 +30,10 @@ public class Main {
// TODO MIPS64: min(x, NaN)?
//
/// CHECK-START-ARM64: void Main.doitMin(float[], float[], float[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(float[] x, float[] y, float[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -53,11 +52,10 @@ public class Main {
// TODO MIPS64: max(x, NaN)?
//
/// CHECK-START-ARM64: void Main.doitMax(float[], float[], float[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(float[] x, float[] y, float[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-int-simd-minmax/src/Main.java b/test/651-checker-int-simd-minmax/src/Main.java
index 11b67b84d3..598106e604 100644
--- a/test/651-checker-int-simd-minmax/src/Main.java
+++ b/test/651-checker-int-simd-minmax/src/Main.java
@@ -27,25 +27,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMin(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMin(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -61,25 +58,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMax(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMax(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMax(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-long-simd-minmax/src/Main.java b/test/651-checker-long-simd-minmax/src/Main.java
index 6289a1e3bb..458cb8bf1b 100644
--- a/test/651-checker-long-simd-minmax/src/Main.java
+++ b/test/651-checker-long-simd-minmax/src/Main.java
@@ -32,11 +32,10 @@ public class Main {
/// CHECK-NOT: VecMin
//
/// CHECK-START-MIPS64: void Main.doitMin(long[], long[], long[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(long[] x, long[] y, long[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -58,11 +57,10 @@ public class Main {
/// CHECK-NOT: VecMax
//
/// CHECK-START-MIPS64: void Main.doitMax(long[], long[], long[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(long[] x, long[] y, long[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-short-simd-minmax/build b/test/651-checker-short-simd-minmax/build
new file mode 100755
index 0000000000..10ffcc537d
--- /dev/null
+++ b/test/651-checker-short-simd-minmax/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/651-checker-short-simd-minmax/src/Main.java b/test/651-checker-short-simd-minmax/src/Main.java
index 58f99d09c2..d8c4d1e87e 100644
--- a/test/651-checker-short-simd-minmax/src/Main.java
+++ b/test/651-checker-short-simd-minmax/src/Main.java
@@ -28,25 +28,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -54,7 +51,7 @@ public class Main {
}
}
- /// CHECK-START-ARM64: void Main.doitMinUnsigned(short[], short[], short[]) instruction_simplifier (before)
+ /// CHECK-START: void Main.doitMinUnsigned(short[], short[], short[]) instruction_simplifier (before)
/// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -65,7 +62,7 @@ public class Main {
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- /// CHECK-START-ARM64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-START: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (before)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -73,27 +70,23 @@ public class Main {
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-ARM64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-MIPS64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-MIPS64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMinUnsigned(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -110,25 +103,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -136,7 +126,7 @@ public class Main {
}
}
- /// CHECK-START-ARM64: void Main.doitMaxUnsigned(short[], short[], short[]) instruction_simplifier (before)
+ /// CHECK-START: void Main.doitMaxUnsigned(short[], short[], short[]) instruction_simplifier (before)
/// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -147,7 +137,7 @@ public class Main {
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- /// CHECK-START-ARM64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-START: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (before)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -155,27 +145,23 @@ public class Main {
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
- // FIXME: Pattern currently not detected. b/67935418
- // CHECK-START-ARM: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-ARM64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-ARM64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
- // CHECK-START-MIPS64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
- // CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- // CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- // CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- // CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-START-MIPS64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMaxUnsigned(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -192,12 +178,11 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin100(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<I100:i\d+>> IntConstant 100 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I100:i\d+>> IntConstant 100 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin100(short[] x, short[] y) {
int min = Math.min(x.length, y.length);
for (int i = 0; i < min; i++) {
diff --git a/test/660-checker-simd-sad-int/src/Main.java b/test/660-checker-simd-sad-int/src/Main.java
index 338e841aad..388bfba0d2 100644
--- a/test/660-checker-simd-sad-int/src/Main.java
+++ b/test/660-checker-simd-sad-int/src/Main.java
@@ -32,26 +32,22 @@ public class Main {
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: int Main.sadInt2Int(int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Ld1:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Ld2:d\d+>> VecLoad [{{l\d+}},<<I>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi>>,<<Ld1>>,<<Ld2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: int Main.sadInt2Int(int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Ld1:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Ld2:d\d+>> VecLoad [{{l\d+}},<<I>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi>>,<<Ld1>>,<<Ld2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
private static int sadInt2Int(int[] x, int[] y) {
int min_length = Math.min(x.length, y.length);
int sad = 0;
@@ -105,26 +101,22 @@ public class Main {
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: int Main.sadInt2IntAlt2(int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Ld1:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Ld2:d\d+>> VecLoad [{{l\d+}},<<I>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi>>,<<Ld1>>,<<Ld2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: int Main.sadInt2IntAlt2(int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Ld1:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Ld2:d\d+>> VecLoad [{{l\d+}},<<I>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi>>,<<Ld1>>,<<Ld2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
private static int sadInt2IntAlt2(int[] x, int[] y) {
int min_length = Math.min(x.length, y.length);
int sad = 0;
diff --git a/test/660-checker-simd-sad-short2/build b/test/660-checker-simd-sad-short2/build
new file mode 100755
index 0000000000..10ffcc537d
--- /dev/null
+++ b/test/660-checker-simd-sad-short2/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/661-checker-simd-reduc/src/Main.java b/test/661-checker-simd-reduc/src/Main.java
index 0b425d8bce..1add0f1026 100644
--- a/test/661-checker-simd-reduc/src/Main.java
+++ b/test/661-checker-simd-reduc/src/Main.java
@@ -63,27 +63,33 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM: int Main.reductionInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
//
/// CHECK-START-ARM64: int Main.reductionInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: int Main.reductionInt(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
private static int reductionInt(int[] x) {
int sum = 0;
@@ -111,54 +117,63 @@ public class Main {
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM: int Main.reductionIntChain() loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [<<Cons1>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Red1:d\d+>> VecReduce [<<Phi2>>] loop:none
- /// CHECK-DAG: <<Extr1:i\d+>> VecExtractScalar [<<Red1>>] loop:none
- /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [<<Extr1>>] loop:none
- /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi4:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi3>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi4>>,<<Load2>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi3>>,<<Cons2>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: <<Red2:d\d+>> VecReduce [<<Phi4>>] loop:none
- /// CHECK-DAG: <<Extr2:i\d+>> VecExtractScalar [<<Red2>>] loop:none
- /// CHECK-DAG: Return [<<Extr2>>] loop:none
+ /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi1:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<I1:i\d+>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi1>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Add [<<I1>>,<<Cons2>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Red1:d\d+>> VecReduce [<<Phi1>>] loop:none
+ /// CHECK-DAG: <<Extr1:i\d+>> VecExtractScalar [<<Red1>>] loop:none
+ /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<I2:i\d+>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: Add [<<I2>>,<<Cons2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Red2:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Extr2:i\d+>> VecExtractScalar [<<Red2>>] loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM64: int Main.reductionIntChain() loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [<<Cons1>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Red1:d\d+>> VecReduce [<<Phi2>>] loop:none
- /// CHECK-DAG: <<Extr1:i\d+>> VecExtractScalar [<<Red1>>] loop:none
- /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [<<Extr1>>] loop:none
- /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi4:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi3>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi4>>,<<Load2>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi3>>,<<Cons4>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: <<Red2:d\d+>> VecReduce [<<Phi4>>] loop:none
- /// CHECK-DAG: <<Extr2:i\d+>> VecExtractScalar [<<Red2>>] loop:none
- /// CHECK-DAG: Return [<<Extr2>>] loop:none
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi1:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<I1:i\d+>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi1>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Add [<<I1>>,<<Cons4>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Red1:d\d+>> VecReduce [<<Phi1>>] loop:none
+ /// CHECK-DAG: <<Extr1:i\d+>> VecExtractScalar [<<Red1>>] loop:none
+ /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<I2:i\d+>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: Add [<<I2>>,<<Cons4>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Red2:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Extr2:i\d+>> VecExtractScalar [<<Red2>>] loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
- // NOTE: pattern is robust with respect to vector loop unrolling.
+ /// CHECK-START-MIPS64: int Main.reductionIntChain() loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi1:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<I1:i\d+>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi1>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Add [<<I1>>,<<Cons4>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Red1:d\d+>> VecReduce [<<Phi1>>] loop:none
+ /// CHECK-DAG: <<Extr1:i\d+>> VecExtractScalar [<<Red1>>] loop:none
+ /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<I2:i\d+>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: Add [<<I2>>,<<Cons4>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Red2:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Extr2:i\d+>> VecExtractScalar [<<Red2>>] loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ // NOTE: pattern is robust with respect to vector loop unrolling and peeling.
private static int reductionIntChain() {
int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 };
int r = 1;
@@ -185,39 +200,34 @@ public class Main {
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM: int Main.reductionIntToLoop(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
- /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Extr>>,{{i\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi4:i\d+>> Phi [<<Extr>>,{{i\d+}}] loop:<<Loop2>> outer_loop:none
- //
- /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM64: int Main.reductionIntToLoop(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
- /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Extr>>,{{i\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi4:i\d+>> Phi [<<Extr>>,{{i\d+}}] loop:<<Loop2>> outer_loop:none
- //
- /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
+ /// CHECK-START-MIPS64: int Main.reductionIntToLoop(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
private static int reductionIntToLoop(int[] x) {
int r = 0;
for (int i = 0; i < 4; i++) {
@@ -241,16 +251,23 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM64: long Main.reductionLong(long[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Long0:j\d+>> LongConstant 0 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Long0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{j\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:j\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: long Main.reductionLong(long[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{j\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:j\d+>> VecExtractScalar [<<Red>>] loop:none
private static long reductionLong(long[] x) {
long sum = 0;
@@ -296,29 +313,33 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM: int Main.reductionIntM1(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<ConsM1:i\d+>> IntConstant -1 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<ConsM1>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
//
/// CHECK-START-ARM64: int Main.reductionIntM1(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<ConsM1:i\d+>> IntConstant -1 loop:none
/// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<ConsM1>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: int Main.reductionIntM1(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
private static int reductionIntM1(int[] x) {
int sum = -1;
@@ -340,16 +361,23 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM64: long Main.reductionLongM1(long[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<LongM1:j\d+>> LongConstant -1 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<LongM1>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{j\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:j\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: long Main.reductionLongM1(long[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{j\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:j\d+>> VecExtractScalar [<<Red>>] loop:none
private static long reductionLongM1(long[] x) {
long sum = -1L;
@@ -394,27 +422,33 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM: int Main.reductionMinusInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecSub [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
//
/// CHECK-START-ARM64: int Main.reductionMinusInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecSub [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: int Main.reductionMinusInt(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
private static int reductionMinusInt(int[] x) {
int sum = 0;
@@ -436,16 +470,23 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM64: long Main.reductionMinusLong(long[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Long0:j\d+>> LongConstant 0 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Long0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecSub [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{j\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:j\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: long Main.reductionMinusLong(long[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{j\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:j\d+>> VecExtractScalar [<<Red>>] loop:none
private static long reductionMinusLong(long[] x) {
long sum = 0;
@@ -491,29 +532,33 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM: int Main.reductionMinInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<ConsM:i\d+>> IntConstant 2147483647 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [<<ConsM>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecMin [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMin [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
//
/// CHECK-START-ARM64: int Main.reductionMinInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<ConsM:i\d+>> IntConstant 2147483647 loop:none
/// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [<<ConsM>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecMin [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMin [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: int Main.reductionMinInt(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMin [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
private static int reductionMinInt(int[] x) {
int min = Integer.MAX_VALUE;
@@ -567,29 +612,33 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM: int Main.reductionMaxInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<ConsM:i\d+>> IntConstant -2147483648 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [<<ConsM>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecMax [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMax [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
//
/// CHECK-START-ARM64: int Main.reductionMaxInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<ConsM:i\d+>> IntConstant -2147483648 loop:none
/// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [<<ConsM>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecMax [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMax [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: int Main.reductionMaxInt(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMax [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
private static int reductionMaxInt(int[] x) {
int max = Integer.MIN_VALUE;
diff --git a/test/665-checker-simd-zero/src/Main.java b/test/665-checker-simd-zero/src/Main.java
index 66eea642a4..6cd6d6465a 100644
--- a/test/665-checker-simd-zero/src/Main.java
+++ b/test/665-checker-simd-zero/src/Main.java
@@ -29,6 +29,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zeroz(boolean[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zeroz(boolean[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = false;
@@ -45,6 +51,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zerob(byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zerob(byte[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
@@ -61,6 +73,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zeroc(char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zeroc(char[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
@@ -77,6 +95,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zeros(short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zeros(short[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
@@ -93,6 +117,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zeroi(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zeroi(int[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
@@ -109,6 +139,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zerol(long[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:j\d+>> LongConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zerol(long[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
@@ -125,6 +161,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zerof(float[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:f\d+>> FloatConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zerof(float[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
@@ -141,6 +183,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zerod(double[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:d\d+>> DoubleConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zerod(double[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
diff --git a/test/667-checker-simd-alignment/expected.txt b/test/667-checker-simd-alignment/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/667-checker-simd-alignment/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/667-checker-simd-alignment/info.txt b/test/667-checker-simd-alignment/info.txt
new file mode 100644
index 0000000000..a46bfaa005
--- /dev/null
+++ b/test/667-checker-simd-alignment/info.txt
@@ -0,0 +1 @@
+Test SIMD vectorization alignment optimizations.
diff --git a/test/667-checker-simd-alignment/src/Main.java b/test/667-checker-simd-alignment/src/Main.java
new file mode 100644
index 0000000000..a6235b8be8
--- /dev/null
+++ b/test/667-checker-simd-alignment/src/Main.java
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for zero vectorization.
+ */
+public class Main {
+
+ /// CHECK-START: void Main.staticallyAligned(int[]) loop_optimization (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<Phi>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM: void Main.staticallyAligned(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Vl:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Nrm:i\d+>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [<<Par>>,<<Nrm>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [<<Par>>,<<Nrm>>,<<Add>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<Vl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: ArrayGet
+ /// CHECK-NOT: ArraySet
+ static void staticallyAligned(int[] a) {
+ // Starts at offset 12 (hidden) + 1 * 4 relative to base alignment.
+ // So no peeling, aligned vector, no cleanup.
+ for (int i = 1; i < 9; i++) {
+ a[i] += 1;
+ }
+ }
+
+ /// CHECK-START: void Main.staticallyAlignedN(int[]) loop_optimization (before)
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Par:l\d+>> NullCheck loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<Phi>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM: void Main.staticallyAlignedN(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Vl:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Par:l\d+>> NullCheck loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Nrm:i\d+>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [<<Par>>,<<Nrm>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [<<Par>>,<<Nrm>>,<<Add>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<Vl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<PhiC:i\d+>> Phi [<<Phi>>,<<AddIC:i\d+>>] loop:<<Clean:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<NrmC:i\d+>> Add [<<PhiC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<NrmC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<Get>>,<<One>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<NrmC>>,<<AddC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddIC>> Add [<<PhiC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ static void staticallyAlignedN(int[] a) {
+ // Starts at offset 12 (hidden) + 1 * 4 relative to base alignment.
+ // So no peeling, aligned vector, cleanup.
+ for (int i = 1; i < a.length; i++) {
+ a[i] += 1;
+ }
+ }
+
+ /// CHECK-START: void Main.staticallyMisaligned(int[]) loop_optimization (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<Phi>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM: void Main.staticallyMisaligned(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Vl:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<PhiP:i\d+>> Phi [<<Zero>>,<<AddIP:i\d+>>] loop:<<Peel:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<PhiP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddP:i\d+>> Add [<<Get>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<PhiP>>,<<AddP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddIP>> Add [<<PhiP>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<PhiP>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [<<Par>>,<<Phi>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [<<Par>>,<<Phi>>,<<Add>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<Vl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: ArrayGet
+ /// CHECK-NOT: ArraySet
+ static void staticallyMisaligned(int[] a) {
+ // Starts at offset 12 (hidden) + 0 * 4 relative to base alignment.
+ // Yes, Art runtime misaligns the most common access pattern :-(
+ // Static peeling to the rescue, aligned vector, no cleanup.
+ for (int i = 0; i < 9; i++) {
+ a[i] += 1;
+ }
+ }
+
+ /// CHECK-START: void Main.staticallyMisalignedN(int[]) loop_optimization (before)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Par:l\d+>> NullCheck loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<Phi>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM: void Main.staticallyMisalignedN(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Vl:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Par:l\d+>> NullCheck loop:none
+ /// CHECK-DAG: <<PhiP:i\d+>> Phi [<<Zero>>,<<AddIP:i\d+>>] loop:<<Peel:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<GetP:i\d+>> ArrayGet [<<Par>>,<<PhiP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddP:i\d+>> Add [<<GetP>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<PhiP>>,<<AddP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddIP>> Add [<<PhiP>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<PhiP>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [<<Par>>,<<Phi>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [<<Par>>,<<Phi>>,<<Add>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<Vl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<PhiC:i\d+>> Phi [<<Phi>>,<<AddIC:i\d+>>] loop:<<Clean:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<GetC:i\d+>> ArrayGet [<<Par>>,<<PhiC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<GetC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<PhiC>>,<<AddC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddIC>> Add [<<PhiC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ static void staticallyMisalignedN(int[] a) {
+ // Starts at offset 12 (hidden) + 0 * 4 relative to base alignment.
+ // Yes, Art runtime misaligns the most common access pattern :-(
+ // Static peeling to the rescue, aligned vector, cleanup.
+ for (int i = 0; i < a.length; i++) {
+ a[i] += 1;
+ }
+ }
+
+ /// CHECK-START: void Main.staticallyUnknownAligned(int[], int) loop_optimization (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Off:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Nrm:i\d+>> Add [<<Off>>,<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<Nrm>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<Nrm>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM: void Main.staticallyUnknownAligned(int[], int) loop_optimization (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Off:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Vl:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<PhiP:i\d+>> Phi [<<Zero>>,<<AddIP:i\d+>>] loop:<<Peel:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<NrmP:i\d+>> Add [<<PhiP>>,<<Off>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<NrmP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddP:i\d+>> Add [<<Get>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<NrmP>>,<<AddP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddIP>> Add [<<PhiP>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<PhiP>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Nrm:i\d+>> Add [<<Phi>>,<<Off>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [<<Par>>,<<Nrm>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [<<Par>>,<<Nrm>>,<<Add>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<Vl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<PhiC:i\d+>> Phi [<<Phi>>,<<AddIC:i\d+>>] loop:<<Clean:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<NrmC:i\d+>> Add [<<PhiC>>,<<Off>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<GetC:i\d+>> ArrayGet [<<Par>>,<<NrmC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<GetC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<NrmC>>,<<AddC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddIC>> Add [<<PhiC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ static void staticallyUnknownAligned(int[] a, int off) {
+ // Starts at an unknown offset due to parameter off.
+ // Dynamic peeling to the rescue, aligned vector, cleanup.
+ for (int i = 0; i < 9; i++) {
+ a[off + i] += 1;
+ }
+ }
+
+ /// CHECK-START: void Main.staticallyUnknownAlignedN(int[], int, int) loop_optimization (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Off:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Nrm:i\d+>> Add [<<Off>>,<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<Nrm>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<Nrm>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM: void Main.staticallyUnknownAlignedN(int[], int, int) loop_optimization (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Off:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Vl:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<PhiP:i\d+>> Phi [<<Zero>>,<<AddIP:i\d+>>] loop:<<Peel:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<NrmP:i\d+>> Add [<<PhiP>>,<<Off>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Par>>,<<NrmP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddP:i\d+>> Add [<<Get>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<NrmP>>,<<AddP>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<AddIP>> Add [<<PhiP>>,<<One>>] loop:<<Peel>> outer_loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<PhiP>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Nrm:i\d+>> Add [<<Phi>>,<<Off>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [<<Par>>,<<Nrm>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [<<Par>>,<<Nrm>>,<<Add>>] alignment:ALIGN(8,0) loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<Vl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<PhiC:i\d+>> Phi [<<Phi>>,<<AddIC:i\d+>>] loop:<<Clean:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<NrmC:i\d+>> Add [<<PhiC>>,<<Off>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<GetC:i\d+>> ArrayGet [<<Par>>,<<NrmC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<GetC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Par>>,<<NrmC>>,<<AddC>>] loop:<<Clean>> outer_loop:none
+ /// CHECK-DAG: <<AddIC>> Add [<<PhiC>>,<<One>>] loop:<<Clean>> outer_loop:none
+ static void staticallyUnknownAlignedN(int[] a, int off, int n) {
+ // Starts at an unknown offset due to parameter off.
+ // Dynamic peeling to the rescue, aligned vector, cleanup.
+ for (int i = 0; i < n; i++) {
+ a[off + i] += 1;
+ }
+ }
+
+ //
+ // Test drivers.
+ //
+
+ private static void test1() {
+ int[] a = new int[9];
+ staticallyAligned(a);
+ for (int i = 0; i < a.length; i++) {
+ int e = i > 0 ? 1 : 0;
+ expectEquals(e, a[i]);
+ }
+ }
+
+ private static void test2() {
+ for (int n = 0; n <= 71; n++) {
+ int[] a = new int[n];
+ staticallyAlignedN(a);
+ for (int i = 0; i < a.length; i++) {
+ int e = i > 0 ? 1 : 0;
+ expectEquals(e, a[i]);
+ }
+ }
+ }
+
+ private static void test3() {
+ int[] a = new int[9];
+ staticallyMisaligned(a);
+ for (int i = 0; i < a.length; i++) {
+ expectEquals(1, a[i]);
+ }
+ }
+
+ private static void test4() {
+ for (int n = 0; n <= 71; n++) {
+ int[] a = new int[n];
+ staticallyMisalignedN(a);
+ for (int i = 0; i < a.length; i++) {
+ expectEquals(1, a[i]);
+ }
+ }
+ }
+
+ private static void test5() {
+ for (int off = 0; off <= 8; off++) {
+ int[] a = new int[17];
+ staticallyUnknownAligned(a, off);
+ for (int i = 0; i < a.length; i++) {
+ int e = (off <= i && i < off + 9) ? 1 : 0;
+ expectEquals(e, a[i]);
+ }
+ }
+ }
+
+ private static void test6() {
+ for (int off = 0; off <= 8; off++) {
+ for (int n = 0; n <= 9; n++) {
+ int[] a = new int[17];
+ staticallyUnknownAlignedN(a, off, n);
+ for (int i = 0; i < a.length; i++) {
+ int e = (off <= i && i < off + n) ? 1 : 0;
+ expectEquals(e, a[i]);
+ }
+ }
+ }
+ }
+
+ public static void main(String[] args) {
+ test1();
+ test2();
+ test4();
+ test5();
+ test6();
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/668-aiobe/expected.txt b/test/668-aiobe/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/668-aiobe/expected.txt
diff --git a/test/668-aiobe/info.txt b/test/668-aiobe/info.txt
new file mode 100644
index 0000000000..e42260150e
--- /dev/null
+++ b/test/668-aiobe/info.txt
@@ -0,0 +1,2 @@
+Regression test for the mterp arm interpreter which used to throw
+the wrong exception when accessing out of bounds a long/double array.
diff --git a/test/668-aiobe/smali/TestCase.smali b/test/668-aiobe/smali/TestCase.smali
new file mode 100644
index 0000000000..5fa62e96e9
--- /dev/null
+++ b/test/668-aiobe/smali/TestCase.smali
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+.class public LTestCase;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {v0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public static run([DI)D
+.registers 2
+ aget-wide p0, p0, p1
+ return-wide p0
+.end method
diff --git a/test/668-aiobe/src/Main.java b/test/668-aiobe/src/Main.java
new file mode 100644
index 0000000000..2bd30c4184
--- /dev/null
+++ b/test/668-aiobe/src/Main.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ double[] array = new double[5];
+ try {
+ Class<?> c = Class.forName("TestCase");
+ Method m = c.getMethod("run", double[].class, int.class);
+ m.invoke(null, array, 42);
+ } catch (InvocationTargetException e) {
+ // expected
+ if (!(e.getCause() instanceof ArrayIndexOutOfBoundsException)) {
+ throw new Error("Expected ArrayIndexOutOfBoundsException, got " + e.getCause());
+ }
+ return;
+ }
+ throw new Error("Expected InvocationTargetException");
+ }
+}
diff --git a/test/706-checker-scheduler/src/Main.java b/test/706-checker-scheduler/src/Main.java
index 08a23a7fbc..d21596d4bc 100644
--- a/test/706-checker-scheduler/src/Main.java
+++ b/test/706-checker-scheduler/src/Main.java
@@ -276,6 +276,83 @@ public class Main {
}
}
+ // This case tests a bug found in LSA where LSA doesn't understand IntermediateAddress,
+ // and incorrectly reported no alias between ArraySet1 and ArrayGet2,
+ // thus ArrayGet2 is scheduled above ArraySet1 incorrectly.
+
+ /// CHECK-START-ARM64: void Main.CrossOverLoop(int[], int[]) scheduler (before)
+ /// CHECK: <<ParamA:l\d+>> ParameterValue loop:none
+ /// CHECK: <<ParamB:l\d+>> ParameterValue loop:none
+ /// CHECK: <<NullB:l\d+>> NullCheck [<<ParamB>>] loop:none
+ /// CHECK: <<NullA:l\d+>> NullCheck [<<ParamA>>] loop:none
+ /// CHECK: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK: <<ArrayGet1:i\d+>> ArrayGet [<<NullB>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: Add loop:<<Loop>> outer_loop:none
+ /// CHECK: <<Addr1:i\d+>> IntermediateAddress [<<NullA>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: <<ArraySet1:v\d+>> ArraySet [<<Addr1>>,{{i\d+}},{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: <<ArrayGet2:i\d+>> ArrayGet [<<NullB>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: Add loop:<<Loop>> outer_loop:none
+ /// CHECK: <<Addr2:i\d+>> IntermediateAddress [<<NullA>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: <<ArraySet2:v\d+>> ArraySet [<<Addr2>>,{{i\d+}},{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: Add loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-ARM64: void Main.CrossOverLoop(int[], int[]) scheduler (after)
+ /// CHECK: <<ParamA:l\d+>> ParameterValue loop:none
+ /// CHECK: <<ParamB:l\d+>> ParameterValue loop:none
+ /// CHECK: <<NullB:l\d+>> NullCheck [<<ParamB>>] loop:none
+ /// CHECK: <<NullA:l\d+>> NullCheck [<<ParamA>>] loop:none
+ /// CHECK: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK: <<ArrayGet1:i\d+>> ArrayGet [<<NullB>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: Add loop:<<Loop>> outer_loop:none
+ /// CHECK: <<Addr1:i\d+>> IntermediateAddress [<<NullA>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: <<ArraySet1:v\d+>> ArraySet [<<Addr1>>,{{i\d+}},{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: <<ArrayGet2:i\d+>> ArrayGet [<<NullB>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: Add loop:<<Loop>> outer_loop:none
+ /// CHECK: <<Addr2:i\d+>> IntermediateAddress [<<NullA>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: <<ArraySet2:v\d+>> ArraySet [<<Addr2>>,{{i\d+}},{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: Add loop:<<Loop>> outer_loop:none
+ private static void CrossOverLoop(int a[], int b[]) {
+ b[20] = 99;
+ for (int i = 0; i < a.length; i++) {
+ a[i] = b[20] - 7;
+ i++;
+ a[i] = b[20] - 7;
+ }
+ }
+
+ // This test case is similar to above cross over loop,
+ // but has more complex chains of transforming the original references:
+ // ParameterValue --> BoundType --> NullCheck --> ArrayGet.
+ // ParameterValue --> BoundType --> NullCheck --> IntermediateAddress --> ArraySet.
+ // After using LSA to analyze the orginal references, the scheduler should be able
+ // to find out that 'a' and 'b' may alias, hence unable to schedule these ArraGet/Set.
+
+ /// CHECK-START-ARM64: void Main.CrossOverLoop2(java.lang.Object, java.lang.Object) scheduler (before)
+ /// CHECK: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK: Add loop:<<Loop>> outer_loop:none
+ /// CHECK: ArraySet loop:<<Loop>> outer_loop:none
+ /// CHECK: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK: Add loop:<<Loop>> outer_loop:none
+ /// CHECK: ArraySet loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-ARM64: void Main.CrossOverLoop2(java.lang.Object, java.lang.Object) scheduler (after)
+ /// CHECK: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK: Add loop:<<Loop>> outer_loop:none
+ /// CHECK: ArraySet loop:<<Loop>> outer_loop:none
+ /// CHECK: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK: Add loop:<<Loop>> outer_loop:none
+ /// CHECK: ArraySet loop:<<Loop>> outer_loop:none
+ private static void CrossOverLoop2(Object a, Object b) {
+ ((int[])b)[20] = 99;
+ for (int i = 0; i < ((int[])a).length; i++) {
+ ((int[])a)[i] = ((int[])b)[20] - 7;
+ i++;
+ ((int[])a)[i] = ((int[])b)[20] - 7;
+ }
+ }
+
/// CHECK-START-ARM: void Main.accessFields() scheduler (before)
/// CHECK: InstanceFieldGet
/// CHECK: Add
diff --git a/test/910-methods/check b/test/910-methods/check
index e6f7d7773f..76b23cb906 100644
--- a/test/910-methods/check
+++ b/test/910-methods/check
@@ -19,8 +19,14 @@ if [[ "$USE_JACK" == true ]]; then
patch -p0 expected.txt < expected_jack.diff
fi
-if [[ "$USE_D8" == true ]]; then
- patch -p0 expected.txt < expected_d8.diff
+./default-check "$@"
+if [[ "$?" == "0" ]]; then
+ exit 0;
fi
+# We cannot always correctly determine if D8 was used because of (b/68406220).
+# So we are just going to try to see it matches the expect output of D8 no
+# matter what.
+patch -p0 expected.txt < expected_d8.diff
+
./default-check "$@"
diff --git a/test/911-get-stack-trace/check b/test/911-get-stack-trace/check
index 835850004a..a46ea9e54a 100644
--- a/test/911-get-stack-trace/check
+++ b/test/911-get-stack-trace/check
@@ -19,4 +19,18 @@ if [[ "$USE_JACK" == true ]]; then
patch -p0 expected.txt < expected_jack.diff
fi
+if [[ "$DX" == 'd8' ]]; then
+ patch -p0 expected.txt < expected_d8.diff
+fi
+
+./default-check "$@"
+if [[ "$?" == "0" ]]; then
+ exit 0;
+fi
+
+# We cannot always correctly determine if D8 was used because of (b/68406220).
+# So we are just going to try to see it matches the expect output of D8 no
+# matter what.
+patch -p0 expected.txt < expected_d8.diff
+
./default-check "$@"
diff --git a/test/911-get-stack-trace/expected_d8.diff b/test/911-get-stack-trace/expected_d8.diff
new file mode 100644
index 0000000000..3ce9bedf95
--- /dev/null
+++ b/test/911-get-stack-trace/expected_d8.diff
@@ -0,0 +1,456 @@
+12c12
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+15c15
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+18c18
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+21c21
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+24c24
+< doTest ()V 34 25
+---
+> doTest ()V 33 25
+32c32
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+35c35
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+38c38
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+41c41
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+44c44
+< doTest ()V 38 26
+---
+> doTest ()V 37 26
+57c57
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+62c62
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+70c70
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+84c84
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+87c87
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+90c90
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+93c93
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+102c102
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+105c105
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+108c108
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+111c111
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+125c125
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+132c132
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+137c137
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+140c140
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+148c148
+< printOrWait (IILart/ControlData;)V 44 54
+---
+> printOrWait (IILart/ControlData;)V 45 54
+152c152
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+155c155
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+158c158
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+161c161
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+169c169
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+172c172
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+175c175
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+178c178
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+183c183
+< printOrWait (IILart/ControlData;)V 44 54
+---
+> printOrWait (IILart/ControlData;)V 45 54
+187c187
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+191c191
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+199c199
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+204c204
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+207c207
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+363c363
+< doTest ()V 122 59
+---
+> doTest ()V 119 59
+376c376
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+379c379
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+382c382
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+385c385
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+397c397
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+400c400
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+403c403
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+406c406
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+418c418
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+421c421
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+424c424
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+427c427
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+439c439
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+442c442
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+445c445
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+448c448
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+460c460
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+463c463
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+466c466
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+469c469
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+481c481
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+484c484
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+487c487
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+490c490
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+502c502
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+505c505
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+508c508
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+511c511
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+523c523
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+526c526
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+529c529
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+532c532
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+544c544
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+547c547
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+550c550
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+553c553
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+565c565
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+568c568
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+571c571
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+574c574
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+598c598
+< doTest ()V 127 61
+---
+> doTest ()V 124 61
+630c630
+< doTest ()V 112 54
+---
+> doTest ()V 109 54
+677c677
+< doTest ()V 117 56
+---
+> doTest ()V 114 56
+687c687
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+690c690
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+693c693
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+696c696
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+708c708
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+711c711
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+714c714
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+717c717
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+729c729
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+732c732
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+735c735
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+738c738
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+750c750
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+753c753
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+756c756
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+759c759
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+771c771
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+774c774
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+777c777
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+780c780
+< baz (IIILart/ControlData;)Ljava/lang/Object; 9 34
+---
+> baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+792c792
+< [public static void art.Frames.doTestSameThread(), 35]
+---
+> [public static void art.Frames.doTestSameThread(), 40]
+807c807
+< [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+---
+> [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+810c810
+< [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+---
+> [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+813c813
+< [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+---
+> [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+816c816
+< [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+---
+> [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+827c827
+< [private static void art.Recurse.printOrWait(int,int,art.ControlData), 2c]
+---
+> [private static void art.Recurse.printOrWait(int,int,art.ControlData), 2d]
+831c831
+< [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+---
+> [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+834c834
+< [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+---
+> [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+837c837
+< [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+---
+> [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+840c840
+< [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 9]
+---
+> [private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
diff --git a/test/988-method-trace/gen_srcs.py b/test/988-method-trace/gen_srcs.py
index 8f1082ffbe..225f41b5b6 100755
--- a/test/988-method-trace/gen_srcs.py
+++ b/test/988-method-trace/gen_srcs.py
@@ -28,8 +28,8 @@ import sys
from string import Template
-# Relative path to art/compiler/intrinsics_list.h
-INTRINSICS_LIST_H = os.path.dirname(os.path.realpath(__file__)) + "/../../compiler/intrinsics_list.h"
+# Relative path to art/runtime/intrinsics_list.h
+INTRINSICS_LIST_H = os.path.dirname(os.path.realpath(__file__)) + "/../../runtime/intrinsics_list.h"
# Macro parameter index to V(). Negative means from the end.
IDX_STATIC_OR_VIRTUAL = 1
@@ -39,7 +39,8 @@ IDX_CLASS_NAME = -3
# Exclude all hidden API.
KLASS_BLACK_LIST = ['sun.misc.Unsafe', 'libcore.io.Memory', 'java.lang.StringFactory',
- 'java.lang.invoke.VarHandle' ] # TODO(b/65872996): Enable when VarHandle is visible.
+ 'java.lang.invoke.MethodHandle', # invokes are tested by 956-method-handles
+ 'java.lang.invoke.VarHandle' ] # TODO(b/65872996): will tested separately
METHOD_BLACK_LIST = [('java.lang.ref.Reference', 'getReferent'),
('java.lang.String', 'getCharsNoCheck'),
('java.lang.System', 'arraycopy')] # arraycopy has a manual test.
@@ -90,7 +91,7 @@ $initialize_classes
}
static void test() {
- // Call each intrinsic from art/compiler/intrinsics_list.h to make sure they are traced.
+ // Call each intrinsic from art/runtime/intrinsics_list.h to make sure they are traced.
$test_body
}
}
diff --git a/test/988-method-trace/src/art/Test988Intrinsics.java b/test/988-method-trace/src/art/Test988Intrinsics.java
index 099fbf2ce8..3069f1a2c3 100644
--- a/test/988-method-trace/src/art/Test988Intrinsics.java
+++ b/test/988-method-trace/src/art/Test988Intrinsics.java
@@ -44,7 +44,7 @@ class Test988Intrinsics {
}
static void test() {
- // Call each intrinsic from art/compiler/intrinsics_list.h to make sure they are traced.
+ // Call each intrinsic from art/runtime/intrinsics_list.h to make sure they are traced.
java.lang.Double.doubleToRawLongBits(0.0);
java.lang.Double.doubleToLongBits(0.0);
java.lang.Double.isInfinite(0.0);
diff --git a/test/992-source-data/expected.txt b/test/992-source-data/expected.txt
index 4db8df0ada..7f59682b1d 100644
--- a/test/992-source-data/expected.txt
+++ b/test/992-source-data/expected.txt
@@ -1,10 +1,22 @@
class art.Test992 is defined in file "Test992.java"
+class art.Test992 does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
class art.Test992$Target1 is defined in file "Test992.java"
+class art.Test992$Target1 does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
class art.Target2 is defined in file "Target2.java"
+class art.Target2 does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
int does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
+int does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
class java.lang.Integer is defined in file "Integer.java"
+class java.lang.Integer does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
class java.lang.Object is defined in file "Object.java"
+class java.lang.Object does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
interface java.lang.Runnable is defined in file "Runnable.java"
+interface java.lang.Runnable does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
class [Ljava.lang.Object; does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
+class [Ljava.lang.Object; does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
class [I does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
+class [I does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
null does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_INVALID_CLASS
+null does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_INVALID_CLASS
+Proxy of [interface java.lang.Runnable] does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
+Proxy of [interface java.lang.Runnable] does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
diff --git a/test/992-source-data/source_file.cc b/test/992-source-data/source_file.cc
index 46d197d048..78687ff005 100644
--- a/test/992-source-data/source_file.cc
+++ b/test/992-source-data/source_file.cc
@@ -49,6 +49,19 @@ jstring JNICALL Java_art_Test992_getSourceFileName(JNIEnv* env,
return ret;
}
+extern "C" JNIEXPORT
+jstring JNICALL Java_art_Test992_getSourceDebugExtension(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jclass target) {
+ char* ext = nullptr;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetSourceDebugExtension(target, &ext))) {
+ return nullptr;
+ }
+ jstring ret = env->NewStringUTF(ext);
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(ext));
+ return ret;
+}
+
} // namespace Test992SourceFile
} // namespace art
diff --git a/test/992-source-data/src/art/Test992.java b/test/992-source-data/src/art/Test992.java
index d9ab112726..cc4f0c724c 100644
--- a/test/992-source-data/src/art/Test992.java
+++ b/test/992-source-data/src/art/Test992.java
@@ -16,6 +16,8 @@
package art;
+import java.lang.reflect.Proxy;
+import java.util.Arrays;
import java.util.Base64;
public class Test992 {
@@ -33,15 +35,30 @@ public class Test992 {
doTest(new Object[0].getClass());
doTest(new int[0].getClass());
doTest(null);
+ doTest(Proxy.getProxyClass(Test992.class.getClassLoader(), Runnable.class));
}
+ public static String printClass(Class<?> k) {
+ if (k != null && Proxy.class.isAssignableFrom(k)) {
+ return String.format("Proxy of %s", Arrays.toString(k.getInterfaces()));
+ } else {
+ return String.format("%s", k);
+ }
+ }
public static void doTest(Class<?> k) {
+ String pk = printClass(k);
+ try {
+ System.out.println(pk + " is defined in file \"" + getSourceFileName(k) + "\"");
+ } catch (Exception e) {
+ System.out.println(pk + " does not have a known source file because " + e);
+ }
try {
- System.out.println(k + " is defined in file \"" + getSourceFileName(k) + "\"");
+ System.out.println(pk + " has extension \"" + getSourceDebugExtension(k) + "\"");
} catch (Exception e) {
- System.out.println(k + " does not have a known source file because " + e);
+ System.out.println(pk + " does not have a known source file extension because " + e);
}
}
public static native String getSourceFileName(Class<?> k) throws Exception;
+ public static native String getSourceDebugExtension(Class<?> k) throws Exception;
}
diff --git a/test/993-breakpoints/breakpoints.cc b/test/993-breakpoints/breakpoints.cc
index 3734ce8634..e9cf3b32c6 100644
--- a/test/993-breakpoints/breakpoints.cc
+++ b/test/993-breakpoints/breakpoints.cc
@@ -49,6 +49,57 @@ jobject JNICALL Java_art_Test993_constructNative(JNIEnv* env,
}
extern "C" JNIEXPORT
+void JNICALL Java_art_Test993_invokeNativeObject(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject target,
+ jclass clazz,
+ jobject thizz) {
+ jmethodID method = env->FromReflectedMethod(target);
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ if (thizz == nullptr) {
+ env->CallStaticObjectMethod(clazz, method);
+ } else {
+ env->CallObjectMethod(thizz, method);
+ }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test993_invokeNativeBool(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject target,
+ jclass clazz,
+ jobject thizz) {
+ jmethodID method = env->FromReflectedMethod(target);
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ if (thizz == nullptr) {
+ env->CallStaticBooleanMethod(clazz, method);
+ } else {
+ env->CallBooleanMethod(thizz, method);
+ }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test993_invokeNativeLong(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject target,
+ jclass clazz,
+ jobject thizz) {
+ jmethodID method = env->FromReflectedMethod(target);
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ if (thizz == nullptr) {
+ env->CallStaticLongMethod(clazz, method);
+ } else {
+ env->CallLongMethod(thizz, method);
+ }
+}
+
+extern "C" JNIEXPORT
void JNICALL Java_art_Test993_invokeNative(JNIEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jobject target,
diff --git a/test/993-breakpoints/expected.txt b/test/993-breakpoints/expected.txt
index 962154734b..1749a77e9d 100644
--- a/test/993-breakpoints/expected.txt
+++ b/test/993-breakpoints/expected.txt
@@ -552,6 +552,107 @@ Running private instance invoke
Breakpoint: private void art.Test993$TestClass4.privateMethod() @ line=118
Invoking "new TestClass4().callPrivateMethod()"
Breakpoint: private void art.Test993$TestClass4.privateMethod() @ line=118
+Running Vector constructor
+ Breaking on []
+ Native constructor: public java.util.Vector(), type: class java.util.Vector
+ Created: []
+ Reflective constructor: public java.util.Vector()
+ Created: []
+ Constructing: new Vector()
+ Created: []
+ Breaking on [public java.util.Vector() @ <NON-DETERMINISTIC>]
+ Native constructor: public java.util.Vector(), type: class java.util.Vector
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Reflective constructor: public java.util.Vector()
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Constructing: new Vector()
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+Running Stack constructor
+ Breaking on []
+ Native constructor: public java.util.Stack(), type: class java.util.Stack
+ Created: []
+ Reflective constructor: public java.util.Stack()
+ Created: []
+ Constructing: new Stack()
+ Created: []
+ Breaking on [public java.util.Stack() @ <NON-DETERMINISTIC>]
+ Native constructor: public java.util.Stack(), type: class java.util.Stack
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Reflective constructor: public java.util.Stack()
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Constructing: new Stack()
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Breaking on [public java.util.Vector() @ <NON-DETERMINISTIC>]
+ Native constructor: public java.util.Stack(), type: class java.util.Stack
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Reflective constructor: public java.util.Stack()
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Constructing: new Stack()
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Breaking on [public java.util.Stack() @ <NON-DETERMINISTIC>, public java.util.Vector() @ <NON-DETERMINISTIC>]
+ Native constructor: public java.util.Stack(), type: class java.util.Stack
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Reflective constructor: public java.util.Stack()
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Constructing: new Stack()
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+Running bcp static invoke
+ Breaking on []
+ Native invoking: public static java.util.Optional java.util.Optional.empty() args: [this: null]
+ Reflective invoking: public static java.util.Optional java.util.Optional.empty() args: [this: null]
+ Invoking "Optional::empty"
+ Breaking on [public static java.util.Optional java.util.Optional.empty() @ <NON-DETERMINISTIC>]
+ Native invoking: public static java.util.Optional java.util.Optional.empty() args: [this: null]
+ Breakpoint: public static java.util.Optional java.util.Optional.empty() @ line=<NON-DETERMINISTIC>
+ Reflective invoking: public static java.util.Optional java.util.Optional.empty() args: [this: null]
+ Breakpoint: public static java.util.Optional java.util.Optional.empty() @ line=<NON-DETERMINISTIC>
+ Invoking "Optional::empty"
+ Breakpoint: public static java.util.Optional java.util.Optional.empty() @ line=<NON-DETERMINISTIC>
+Running bcp private static invoke
+ Breaking on []
+ Native invoking: private static long java.util.Random.seedUniquifier() args: [this: null]
+ Invoking "Random::seedUniquifier"
+ Breaking on [private static long java.util.Random.seedUniquifier() @ <NON-DETERMINISTIC>]
+ Native invoking: private static long java.util.Random.seedUniquifier() args: [this: null]
+ Breakpoint: private static long java.util.Random.seedUniquifier() @ line=<NON-DETERMINISTIC>
+ Invoking "Random::seedUniquifier"
+ Breakpoint: private static long java.util.Random.seedUniquifier() @ line=<NON-DETERMINISTIC>
+Running bcp private invoke
+ Breaking on []
+ Native invoking: private java.math.BigDecimal java.time.Duration.toSeconds() args: [this: PT336H]
+ Invoking "Duration::toSeconds"
+ Breaking on [private java.math.BigDecimal java.time.Duration.toSeconds() @ <NON-DETERMINISTIC>]
+ Native invoking: private java.math.BigDecimal java.time.Duration.toSeconds() args: [this: PT336H]
+ Breakpoint: private java.math.BigDecimal java.time.Duration.toSeconds() @ line=<NON-DETERMINISTIC>
+ Invoking "Duration::toSeconds"
+ Breakpoint: private java.math.BigDecimal java.time.Duration.toSeconds() @ line=<NON-DETERMINISTIC>
+Running bcp invoke
+ Breaking on []
+ Native invoking: public boolean java.util.Optional.isPresent() args: [this: Optional[test]]
+ Reflective invoking: public boolean java.util.Optional.isPresent() args: [this: Optional[test2]]
+ Invoking "Optional::isPresent"
+ Breaking on [public boolean java.util.Optional.isPresent() @ <NON-DETERMINISTIC>]
+ Native invoking: public boolean java.util.Optional.isPresent() args: [this: Optional[test]]
+ Breakpoint: public boolean java.util.Optional.isPresent() @ line=<NON-DETERMINISTIC>
+ Reflective invoking: public boolean java.util.Optional.isPresent() args: [this: Optional[test2]]
+ Breakpoint: public boolean java.util.Optional.isPresent() @ line=<NON-DETERMINISTIC>
+ Invoking "Optional::isPresent"
+ Breakpoint: public boolean java.util.Optional.isPresent() @ line=<NON-DETERMINISTIC>
Running TestClass1 constructor
Breaking on []
Native constructor: public art.Test993$TestClass1(), type: class art.Test993$TestClass1
diff --git a/test/993-breakpoints/src/art/Test993.java b/test/993-breakpoints/src/art/Test993.java
index 781ebffc0f..d6a6a676cd 100644
--- a/test/993-breakpoints/src/art/Test993.java
+++ b/test/993-breakpoints/src/art/Test993.java
@@ -16,20 +16,20 @@
package art;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.util.Arrays;
import java.lang.reflect.Executable;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
-import java.util.List;
-import java.util.Set;
-import java.util.Spliterator;
-import java.util.Spliterators;
-import java.util.Collection;
+
+import java.time.Duration;
+
import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.function.IntUnaryOperator;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+import java.util.Random;
+import java.util.Stack;
+import java.util.Vector;
+
import java.util.function.Supplier;
public class Test993 {
@@ -120,7 +120,13 @@ public class Test993 {
}
public static void notifyBreakpointReached(Thread thr, Executable e, long loc) {
- System.out.println("\t\t\tBreakpoint: " + e + " @ line=" + Breakpoint.locationToLine(e, loc));
+ String line;
+ if (e.getDeclaringClass().getPackage().equals(Test993.class.getPackage())) {
+ line = Integer.valueOf(Breakpoint.locationToLine(e, loc)).toString();
+ } else {
+ line = "<NON-DETERMINISTIC>";
+ }
+ System.out.println("\t\t\tBreakpoint: " + e + " @ line=" + line);
}
public static interface ThrowRunnable extends Runnable {
@@ -180,6 +186,57 @@ public class Test993 {
public static native void invokeNative(Method m, Class<?> clazz, Object thizz);
+ public static class InvokeNativeBool implements Runnable {
+ Method m;
+ Object this_arg;
+ public InvokeNativeBool(Method m, Object this_arg) {
+ this.m = m;
+ this.this_arg = this_arg;
+ }
+
+ @Override
+ public void run() {
+ System.out.println("\t\tNative invoking: " + m + " args: [this: " + this_arg + "]");
+ invokeNativeBool(m, m.getDeclaringClass(), this_arg);
+ }
+ }
+
+ public static native void invokeNativeBool(Method m, Class<?> clazz, Object thizz);
+
+ public static class InvokeNativeObject implements Runnable {
+ Method m;
+ Object this_arg;
+ public InvokeNativeObject(Method m, Object this_arg) {
+ this.m = m;
+ this.this_arg = this_arg;
+ }
+
+ @Override
+ public void run() {
+ System.out.println("\t\tNative invoking: " + m + " args: [this: " + this_arg + "]");
+ invokeNativeObject(m, m.getDeclaringClass(), this_arg);
+ }
+ }
+
+ public static native void invokeNativeObject(Method m, Class<?> clazz, Object thizz);
+
+ public static class InvokeNativeLong implements Runnable {
+ Method m;
+ Object this_arg;
+ public InvokeNativeLong(Method m, Object this_arg) {
+ this.m = m;
+ this.this_arg = this_arg;
+ }
+
+ @Override
+ public void run() {
+ System.out.println("\t\tNative invoking: " + m + " args: [this: " + this_arg + "]");
+ invokeNativeLong(m, m.getDeclaringClass(), this_arg);
+ }
+ }
+
+ public static native void invokeNativeLong(Method m, Class<?> clazz, Object thizz);
+
public static class ConstructDirect implements Runnable {
String msg;
Supplier<Object> s;
@@ -258,7 +315,15 @@ public class Test993 {
}
private static Breakpoint.Manager.BP BP(Executable m) {
- return new Breakpoint.Manager.BP(m);
+ return new Breakpoint.Manager.BP(m) {
+ public String toString() {
+ if (method.getDeclaringClass().getPackage().equals(Test993.class.getPackage())) {
+ return super.toString();
+ } else {
+ return method.toString() + " @ <NON-DETERMINISTIC>";
+ }
+ }
+ };
}
public static void run() throws Exception {
@@ -271,6 +336,7 @@ public class Test993 {
Thread.currentThread());
runMethodTests();
+ runBCPMethodTests();
runConstructorTests();
Breakpoint.stopBreakpointWatch(Thread.currentThread());
@@ -302,6 +368,94 @@ public class Test993 {
runTestGroups("TestClass1ext constructor", tc1ext_constructors, tc1ext_bps);
}
+ // These test to make sure we are able to break on functions that might have been quickened or
+ // inlined from the boot-image. These were all chosen for being in the bootclasspath, not being
+ // long enough to prevent inlining, and not being used for the testing framework.
+ public static void runBCPMethodTests() throws Exception {
+ // The methods we will be breaking on.
+ Method bcp_private_method = Duration.class.getDeclaredMethod("toSeconds");
+ Method bcp_virtual_method = Optional.class.getDeclaredMethod("isPresent");
+ Method bcp_static_method = Optional.class.getDeclaredMethod("empty");
+ Method bcp_private_static_method = Random.class.getDeclaredMethod("seedUniquifier");
+
+ // Some constructors we will break on.
+ Constructor<?> bcp_stack_constructor = Stack.class.getConstructor();
+ Constructor<?> bcp_vector_constructor = Vector.class.getConstructor();
+ if (!(Vector.class.isAssignableFrom(Stack.class))) {
+ throw new Error("Expected Stack to extend Vector!");
+ }
+
+ // BCP constructors.
+ Runnable[] vector_constructors = new Runnable[] {
+ new ConstructNative(bcp_vector_constructor),
+ new ConstructReflect(bcp_vector_constructor),
+ new ConstructDirect("new Vector()", Vector::new),
+ };
+ Breakpoint.Manager.BP[] vector_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_vector_constructor),
+ };
+ runTestGroups("Vector constructor", vector_constructors, vector_breakpoints);
+
+ Runnable[] stack_constructors = new Runnable[] {
+ new ConstructNative(bcp_stack_constructor),
+ new ConstructReflect(bcp_stack_constructor),
+ new ConstructDirect("new Stack()", Stack::new),
+ };
+ Breakpoint.Manager.BP[] stack_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_stack_constructor), BP(bcp_vector_constructor),
+ };
+ runTestGroups("Stack constructor", stack_constructors, stack_breakpoints);
+
+ // Static function
+ Runnable[] static_invokes = new Runnable[] {
+ new InvokeNativeObject(bcp_static_method, null),
+
+ new InvokeReflect(bcp_static_method, null),
+
+ new InvokeDirect("Optional::empty", () -> { Optional.empty(); }),
+ };
+ Breakpoint.Manager.BP[] static_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_static_method)
+ };
+ runTestGroups("bcp static invoke", static_invokes, static_breakpoints);
+
+ // Static private class function
+ Runnable[] private_static_invokes = new Runnable[] {
+ new InvokeNativeLong(bcp_private_static_method, null),
+
+ new InvokeDirect("Random::seedUniquifier", () -> { new Random(); }),
+ };
+ Breakpoint.Manager.BP[] private_static_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_private_static_method)
+ };
+ runTestGroups("bcp private static invoke", private_static_invokes, private_static_breakpoints);
+
+ // private class method
+ Duration test_duration = Duration.ofDays(14);
+ Runnable[] private_invokes = new Runnable[] {
+ new InvokeNativeObject(bcp_private_method, test_duration),
+
+ new InvokeDirect("Duration::toSeconds", () -> { test_duration.multipliedBy(2); }),
+ };
+ Breakpoint.Manager.BP[] private_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_private_method)
+ };
+ runTestGroups("bcp private invoke", private_invokes, private_breakpoints);
+
+ // class method
+ Runnable[] public_invokes = new Runnable[] {
+ new InvokeNativeBool(bcp_virtual_method, Optional.of("test")),
+
+ new InvokeReflect(bcp_virtual_method, Optional.of("test2")),
+
+ new InvokeDirect("Optional::isPresent", () -> { Optional.of("test3").isPresent(); }),
+ };
+ Breakpoint.Manager.BP[] public_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_virtual_method)
+ };
+ runTestGroups("bcp invoke", public_invokes, public_breakpoints);
+ }
+
public static void runMethodTests() throws Exception {
// The methods we will be breaking on.
Method breakpoint_method = Test993.class.getDeclaredMethod("breakpoint");
diff --git a/test/Android.bp b/test/Android.bp
index 16b30f988f..17ef1141df 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -359,6 +359,7 @@ cc_defaults {
"141-class-unload/jni_unload.cc",
"148-multithread-gc-annotations/gc_coverage.cc",
"149-suspend-all-stress/suspend_all.cc",
+ "203-multi-checkpoint/multi_checkpoint.cc",
"154-gc-loop/heap_interface.cc",
"454-get-vreg/get_vreg_jni.cc",
"457-regs/regs_jni.cc",
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 2fda494086..bf964a6895 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -16,6 +16,7 @@ BOOT_IMAGE=""
COMPILE_FLAGS=""
DALVIKVM="dalvikvm32"
DEBUGGER="n"
+WITH_AGENT=""
DEBUGGER_AGENT=""
WRAP_DEBUGGER_AGENT="n"
DEV_MODE="n"
@@ -228,6 +229,11 @@ while true; do
FLAGS="${FLAGS} -Xcompiler-option --dump-cfg-append"
COMPILE_FLAGS="${COMPILE_FLAGS} --dump-cfg-append"
shift
+ elif [ "x$1" = "x--with-agent" ]; then
+ shift
+ USE_JVMTI="y"
+ WITH_AGENT="$1"
+ shift
elif [ "x$1" = "x--debug-wrap-agent" ]; then
WRAP_DEBUGGER_AGENT="y"
shift
@@ -442,6 +448,10 @@ elif [ "$DEBUGGER" = "agent" ]; then
DEBUGGER_OPTS="-agentpath:${AGENTPATH}=transport=dt_socket,address=$PORT,server=y,suspend=y"
fi
+if [ "x$WITH_AGENT" != "x" ]; then
+ FLAGS="${FLAGS} -agentpath:${WITH_AGENT}"
+fi
+
if [ "$USE_JVMTI" = "y" ]; then
if [ "$USE_JVM" = "n" ]; then
plugin=libopenjdkjvmtid.so
diff --git a/test/run-test b/test/run-test
index 09a70e50a9..fdb2ee47a7 100755
--- a/test/run-test
+++ b/test/run-test
@@ -291,6 +291,11 @@ while true; do
elif [ "x$1" = "x--debug-wrap-agent" ]; then
run_args="${run_args} --debug-wrap-agent"
shift
+ elif [ "x$1" = "x--with-agent" ]; then
+ shift
+ option="$1"
+ run_args="${run_args} --with-agent $1"
+ shift
elif [ "x$1" = "x--debug-agent" ]; then
shift
option="$1"
@@ -661,6 +666,7 @@ if [ "$usage" = "yes" ]; then
echo " only supported on host."
echo " --debug-wrap-agent use libwrapagentproperties and tools/libjdwp-compat.props"
echo " to load the debugger agent specified by --debug-agent."
+ echo " --with-agent <agent> Run the test with the given agent loaded with -agentpath:"
echo " --debuggable Whether to compile Java code for a debugger."
echo " --gdb Run under gdb; incompatible with some tests."
echo " --gdb-arg Pass an option to gdb."
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index b9123deee2..92cc977f01 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -35,7 +35,7 @@ _DUMP_MANY_VARS_LIST = ['HOST_2ND_ARCH_PREFIX',
'HOST_OUT_EXECUTABLES',
'ANDROID_JAVA_TOOLCHAIN',
'ANDROID_COMPILE_WITH_JACK',
- 'USE_D8_BY_DEFAULT']
+ 'USE_D8']
_DUMP_MANY_VARS = None # To be set to a dictionary with above list being the keys,
# and the build variable being the value.
def _dump_many_vars(var_name):
@@ -53,15 +53,13 @@ def _dump_many_vars(var_name):
all_vars=" ".join(_DUMP_MANY_VARS_LIST)
# The command is taken from build/envsetup.sh to fetch build variables.
- command = ("CALLED_FROM_SETUP=true " # Enable the 'dump-many-vars' make target.
- "BUILD_SYSTEM=build/core " # Set up lookup path for make includes.
- "make --no-print-directory -C \"%s\" -f build/core/config.mk "
- "dump-many-vars DUMP_MANY_VARS=\"%s\"") % (ANDROID_BUILD_TOP, all_vars)
+ command = ("build/soong/soong_ui.bash --dumpvars-mode --vars=\"%s\"") % (all_vars)
config = subprocess.Popen(command,
stdout=subprocess.PIPE,
universal_newlines=True,
- shell=True).communicate()[0] # read until EOF, select stdin
+ shell=True,
+ cwd=ANDROID_BUILD_TOP).communicate()[0] # read until EOF, select stdin
# Prints out something like:
# TARGET_ARCH='arm64'
# HOST_ARCH='x86_64'
@@ -111,7 +109,7 @@ ANDROID_BUILD_TOP = _get_android_build_top()
ANDROID_COMPILE_WITH_JACK = _get_build_var_boolean('ANDROID_COMPILE_WITH_JACK', 'default')
# Follow the build system's D8 usage.
-USE_D8_BY_DEFAULT = _get_build_var_boolean('USE_D8_BY_DEFAULT', False)
+USE_D8 = _get_build_var_boolean('USE_D8', False)
# Directory used for temporary test files on the host.
ART_HOST_TEST_DIR = tempfile.mkdtemp(prefix = 'test-art-')
diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py
index 492b792239..531508e6b3 100755
--- a/test/testrunner/run_build_test_target.py
+++ b/test/testrunner/run_build_test_target.py
@@ -64,6 +64,7 @@ os.environ.update(custom_env)
if target.has_key('make'):
build_command = 'make'
+ build_command += ' DX='
build_command += ' -j' + str(n_threads)
build_command += ' -C ' + env.ANDROID_BUILD_TOP
build_command += ' ' + target.get('make')
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 0226cd405a..20a0cd93e5 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -265,10 +265,10 @@ def find_extra_device_arguments(target):
Gets any extra arguments from the device_config.
"""
if target == 'host':
- return device_config.get(target, [])
+ return device_config.get(target, { 'run-test-args' : [] })['run-test-args']
else:
device = get_device_name()
- return device_config.get(device, [])
+ return device_config.get(device, { 'run-test-args' : [] })['run-test-args']
def get_device_name():
"""
@@ -460,7 +460,7 @@ def run_tests(tests):
elif env.ANDROID_COMPILE_WITH_JACK == False:
options_test += ' --build-with-javac-dx'
- if env.USE_D8_BY_DEFAULT == True:
+ if env.USE_D8 == True:
options_test += ' --build-with-d8'
# TODO(http://36039166): This is a temporary solution to
@@ -948,6 +948,7 @@ def main():
if 'target' in _user_input_variants['target']:
build_targets += 'test-art-target-run-test-dependencies'
build_command = 'make'
+ build_command += ' DX='
build_command += ' -j'
build_command += ' -C ' + env.ANDROID_BUILD_TOP
build_command += ' ' + build_targets
@@ -962,7 +963,12 @@ def main():
test_runner_thread.daemon = True
try:
test_runner_thread.start()
- while threading.active_count() > 1:
+ # This loops waits for all the threads to finish, unless
+ # stop_testrunner is set to True. When ART_TEST_KEEP_GOING
+ # is set to false, stop_testrunner is set to True as soon as
+ # a test fails to signal the parent thread to stop
+ # the execution of the testrunner.
+ while threading.active_count() > 1 and not stop_testrunner:
time.sleep(0.1)
print_analysis()
except Exception as e:
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 5eccba1327..a9a0492fe9 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -30,7 +30,52 @@ LOCAL_MODULE := ahat
# Let users with Java 7 run ahat (b/28303627)
LOCAL_JAVA_LANGUAGE_VERSION := 1.7
+# Make this available on the classpath of the general-tests tradefed suite.
+# It is used by libcore tests that run there.
+LOCAL_COMPATIBILITY_SUITE := general-tests
+
include $(BUILD_HOST_JAVA_LIBRARY)
+AHAT_JAR := $(LOCAL_BUILT_MODULE)
+AHAT_API := $(intermediates.COMMON)/ahat_api.txt
+AHAT_REMOVED_API := $(intermediates.COMMON)/ahat_removed_api.txt
+
+# --- api check for ahat.jar ----------
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := $(call all-java-files-under, src/main)
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := JAVA_LIBRARIES
+LOCAL_MODULE := ahat
+LOCAL_DROIDDOC_OPTIONS := \
+ -stubpackages com.android.ahat:com.android.ahat.* \
+ -api $(AHAT_API) \
+ -removedApi $(AHAT_REMOVED_API)
+LOCAL_DROIDDOC_CUSTOM_TEMPLATE_DIR := external/doclava/res/assets/templates-sdk
+include $(BUILD_DROIDDOC)
+$(AHAT_API): $(full_target)
+
+$(eval $(call check-api, \
+ ahat-check-api, \
+ $(LOCAL_PATH)/etc/ahat_api.txt, \
+ $(AHAT_API), \
+ $(LOCAL_PATH)/etc/ahat_removed_api.txt, \
+ $(AHAT_REMOVED_API), \
+ -error 2 -error 3 -error 4 -error 5 -error 6 -error 7 -error 8 -error 9 -error 10 -error 11 \
+ -error 12 -error 13 -error 14 -error 15 -error 16 -error 17 -error 18 -error 19 -error 20 \
+ -error 21 -error 23 -error 24 -error 25 -error 26 -error 27, \
+ cat $(LOCAL_PATH)/etc/ahat_api_msg.txt, \
+ $(AHAT_JAR),))
+
+.PHONY: ahat-update-api
+ahat-update-api: PRIVATE_AHAT_API := $(AHAT_API)
+ahat-update-api: PRIVATE_AHAT_REMOVED_API := $(AHAT_REMOVED_API)
+ahat-update-api: PRIVATE_AHAT_ETC_API := $(LOCAL_PATH)/etc/ahat_api.txt
+ahat-update-api: PRIVATE_AHAT_ETC_REMOVED_API := $(LOCAL_PATH)/etc/ahat_removed_api.txt
+ahat-update-api: ahat-docs
+ @echo Copying ahat_api.txt
+ cp $(PRIVATE_AHAT_API) $(PRIVATE_AHAT_ETC_API)
+ @echo Copying ahat_removed_api.txt
+ cp $(PRIVATE_AHAT_REMOVED_API) $(PRIVATE_AHAT_ETC_REMOVED_API)
# --- ahat script ----------------
include $(CLEAR_VARS)
@@ -62,6 +107,12 @@ AHAT_TEST_DUMP_HPROF := $(intermediates.COMMON)/test-dump.hprof
AHAT_TEST_DUMP_BASE_HPROF := $(intermediates.COMMON)/test-dump-base.hprof
AHAT_TEST_DUMP_PROGUARD_MAP := $(intermediates.COMMON)/test-dump.map
+# Directories to use for ANDROID_DATA when generating the test dumps to
+# ensure we don't pollute the source tree with any artifacts from running
+# dalvikvm.
+AHAT_TEST_DUMP_ANDROID_DATA := $(intermediates.COMMON)/test-dump-android_data
+AHAT_TEST_DUMP_BASE_ANDROID_DATA := $(intermediates.COMMON)/test-dump-base-android_data
+
# Generate the proguard map in the desired location by copying it from
# wherever the build system generates it by default.
$(AHAT_TEST_DUMP_PROGUARD_MAP): PRIVATE_AHAT_SOURCE_PROGUARD_MAP := $(proguard_dictionary)
@@ -70,20 +121,28 @@ $(AHAT_TEST_DUMP_PROGUARD_MAP): $(proguard_dictionary)
# Run ahat-test-dump.jar to generate test-dump.hprof and test-dump-base.hprof
AHAT_TEST_DUMP_DEPENDENCIES := \
- $(ART_HOST_EXECUTABLES) \
+ $(HOST_OUT_EXECUTABLES)/dalvikvm64 \
$(ART_HOST_SHARED_LIBRARY_DEPENDENCIES) \
$(HOST_OUT_EXECUTABLES)/art \
$(HOST_CORE_IMG_OUT_BASE)$(CORE_IMG_SUFFIX)
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
+$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ANDROID_DATA := $(AHAT_TEST_DUMP_ANDROID_DATA)
$(AHAT_TEST_DUMP_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES)
- $(PRIVATE_AHAT_TEST_ART) -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@
+ rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA)
+ mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA)
+ ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \
+ $(PRIVATE_AHAT_TEST_ART) --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@
$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
+$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_ANDROID_DATA := $(AHAT_TEST_DUMP_BASE_ANDROID_DATA)
$(AHAT_TEST_DUMP_BASE_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES)
- $(PRIVATE_AHAT_TEST_ART) -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base
+ rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA)
+ mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA)
+ ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \
+ $(PRIVATE_AHAT_TEST_ART) --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base
# --- ahat-tests.jar --------------
include $(CLEAR_VARS)
@@ -111,10 +170,15 @@ endif # EMMA_INSTRUMENT
endif # linux
# Clean up local variables.
+AHAT_JAR :=
+AHAT_API :=
+AHAT_REMOVED_API :=
AHAT_TEST_JAR :=
AHAT_TEST_DUMP_JAR :=
AHAT_TEST_DUMP_HPROF :=
AHAT_TEST_DUMP_BASE_HPROF :=
AHAT_TEST_DUMP_PROGUARD_MAP :=
AHAT_TEST_DUMP_DEPENDENCIES :=
+AHAT_TEST_DUMP_ANDROID_DATA :=
+AHAT_TEST_DUMP_BASE_ANDROID_DATA :=
diff --git a/tools/ahat/etc/ahat_api.txt b/tools/ahat/etc/ahat_api.txt
new file mode 100644
index 0000000000..7920adae55
--- /dev/null
+++ b/tools/ahat/etc/ahat_api.txt
@@ -0,0 +1,327 @@
+package com.android.ahat {
+
+ public class Main {
+ method public static void main(java.lang.String[]);
+ }
+
+}
+
+package com.android.ahat.dominators {
+
+ public class DominatorsComputation {
+ ctor public DominatorsComputation();
+ method public static void computeDominators(com.android.ahat.dominators.DominatorsComputation.Node);
+ }
+
+ public static abstract interface DominatorsComputation.Node {
+ method public abstract java.lang.Object getDominatorsComputationState();
+ method public abstract java.lang.Iterable<? extends com.android.ahat.dominators.DominatorsComputation.Node> getReferencesForDominators();
+ method public abstract void setDominator(com.android.ahat.dominators.DominatorsComputation.Node);
+ method public abstract void setDominatorsComputationState(java.lang.Object);
+ }
+
+}
+
+package com.android.ahat.heapdump {
+
+ public class AhatArrayInstance extends com.android.ahat.heapdump.AhatInstance {
+ method public int getLength();
+ method public com.android.ahat.heapdump.Value getValue(int);
+ method public java.util.List<com.android.ahat.heapdump.Value> getValues();
+ method public java.lang.String toString();
+ }
+
+ public class AhatClassInstance extends com.android.ahat.heapdump.AhatInstance {
+ method public java.lang.Iterable<com.android.ahat.heapdump.FieldValue> getInstanceFields();
+ method public java.lang.String toString();
+ }
+
+ public class AhatClassObj extends com.android.ahat.heapdump.AhatInstance {
+ method public com.android.ahat.heapdump.AhatInstance getClassLoader();
+ method public com.android.ahat.heapdump.Field[] getInstanceFields();
+ method public long getInstanceSize();
+ method public java.lang.String getName();
+ method public java.util.List<com.android.ahat.heapdump.FieldValue> getStaticFieldValues();
+ method public com.android.ahat.heapdump.AhatClassObj getSuperClassObj();
+ method public java.lang.String toString();
+ }
+
+ public class AhatHeap implements com.android.ahat.heapdump.Diffable {
+ method public com.android.ahat.heapdump.AhatHeap getBaseline();
+ method public java.lang.String getName();
+ method public com.android.ahat.heapdump.Size getSize();
+ method public boolean isPlaceHolder();
+ }
+
+ public abstract class AhatInstance implements com.android.ahat.heapdump.Diffable com.android.ahat.dominators.DominatorsComputation.Node {
+ method public com.android.ahat.heapdump.AhatArrayInstance asArrayInstance();
+ method public java.awt.image.BufferedImage asBitmap();
+ method public com.android.ahat.heapdump.AhatClassInstance asClassInstance();
+ method public com.android.ahat.heapdump.AhatClassObj asClassObj();
+ method public java.lang.String asString(int);
+ method public java.lang.String asString();
+ method public com.android.ahat.heapdump.AhatInstance getAssociatedBitmapInstance();
+ method public com.android.ahat.heapdump.AhatInstance getBaseline();
+ method public java.lang.String getClassName();
+ method public com.android.ahat.heapdump.AhatClassObj getClassObj();
+ method public java.lang.String getDexCacheLocation(int);
+ method public java.util.List<com.android.ahat.heapdump.AhatInstance> getDominated();
+ method public java.lang.Object getDominatorsComputationState();
+ method public com.android.ahat.heapdump.Value getField(java.lang.String);
+ method public java.util.List<com.android.ahat.heapdump.AhatInstance> getHardReverseReferences();
+ method public com.android.ahat.heapdump.AhatHeap getHeap();
+ method public long getId();
+ method public com.android.ahat.heapdump.AhatInstance getImmediateDominator();
+ method public java.util.List<com.android.ahat.heapdump.PathElement> getPathFromGcRoot();
+ method public com.android.ahat.heapdump.AhatInstance getRefField(java.lang.String);
+ method public java.lang.Iterable<? extends com.android.ahat.dominators.DominatorsComputation.Node> getReferencesForDominators();
+ method public com.android.ahat.heapdump.AhatInstance getReferent();
+ method public com.android.ahat.heapdump.Size getRetainedSize(com.android.ahat.heapdump.AhatHeap);
+ method public java.util.Collection<com.android.ahat.heapdump.RootType> getRootTypes();
+ method public com.android.ahat.heapdump.Site getSite();
+ method public com.android.ahat.heapdump.Size getSize();
+ method public java.util.List<com.android.ahat.heapdump.AhatInstance> getSoftReverseReferences();
+ method public com.android.ahat.heapdump.Size getTotalRetainedSize();
+ method public boolean isArrayInstance();
+ method public boolean isClassInstance();
+ method public boolean isClassObj();
+ method public boolean isPlaceHolder();
+ method public boolean isRoot();
+ method public boolean isStronglyReachable();
+ method public boolean isUnreachable();
+ method public boolean isWeaklyReachable();
+ method public void setDominator(com.android.ahat.dominators.DominatorsComputation.Node);
+ method public void setDominatorsComputationState(java.lang.Object);
+ method public abstract java.lang.String toString();
+ }
+
+ public class AhatSnapshot implements com.android.ahat.heapdump.Diffable {
+ method public com.android.ahat.heapdump.AhatClassObj findClassObj(long);
+ method public com.android.ahat.heapdump.AhatInstance findInstance(long);
+ method public com.android.ahat.heapdump.AhatSnapshot getBaseline();
+ method public com.android.ahat.heapdump.AhatHeap getHeap(java.lang.String);
+ method public java.util.List<com.android.ahat.heapdump.AhatHeap> getHeaps();
+ method public com.android.ahat.heapdump.Site getRootSite();
+ method public java.util.List<com.android.ahat.heapdump.AhatInstance> getRooted();
+ method public com.android.ahat.heapdump.Site getSite(long);
+ method public boolean isDiffed();
+ method public boolean isPlaceHolder();
+ }
+
+ public class Diff {
+ ctor public Diff();
+ method public static void snapshots(com.android.ahat.heapdump.AhatSnapshot, com.android.ahat.heapdump.AhatSnapshot);
+ }
+
+ public class DiffFields {
+ ctor public DiffFields();
+ method public static java.util.List<com.android.ahat.heapdump.DiffedFieldValue> diff(java.lang.Iterable<com.android.ahat.heapdump.FieldValue>, java.lang.Iterable<com.android.ahat.heapdump.FieldValue>);
+ }
+
+ public abstract interface Diffable<T> {
+ method public abstract T getBaseline();
+ method public abstract boolean isPlaceHolder();
+ }
+
+ public class DiffedFieldValue {
+ method public static com.android.ahat.heapdump.DiffedFieldValue added(com.android.ahat.heapdump.FieldValue);
+ method public static com.android.ahat.heapdump.DiffedFieldValue deleted(com.android.ahat.heapdump.FieldValue);
+ method public static com.android.ahat.heapdump.DiffedFieldValue matched(com.android.ahat.heapdump.FieldValue, com.android.ahat.heapdump.FieldValue);
+ field public final com.android.ahat.heapdump.Value baseline;
+ field public final com.android.ahat.heapdump.Value current;
+ field public final java.lang.String name;
+ field public final com.android.ahat.heapdump.DiffedFieldValue.Status status;
+ field public final com.android.ahat.heapdump.Type type;
+ }
+
+ public static final class DiffedFieldValue.Status extends java.lang.Enum {
+ method public static com.android.ahat.heapdump.DiffedFieldValue.Status valueOf(java.lang.String);
+ method public static final com.android.ahat.heapdump.DiffedFieldValue.Status[] values();
+ enum_constant public static final com.android.ahat.heapdump.DiffedFieldValue.Status ADDED;
+ enum_constant public static final com.android.ahat.heapdump.DiffedFieldValue.Status DELETED;
+ enum_constant public static final com.android.ahat.heapdump.DiffedFieldValue.Status MATCHED;
+ }
+
+ public class Field {
+ ctor public Field(java.lang.String, com.android.ahat.heapdump.Type);
+ field public final java.lang.String name;
+ field public final com.android.ahat.heapdump.Type type;
+ }
+
+ public class FieldValue {
+ ctor public FieldValue(java.lang.String, com.android.ahat.heapdump.Type, com.android.ahat.heapdump.Value);
+ field public final java.lang.String name;
+ field public final com.android.ahat.heapdump.Type type;
+ field public final com.android.ahat.heapdump.Value value;
+ }
+
+ public class HprofFormatException extends java.lang.Exception {
+ }
+
+ public class Parser {
+ ctor public Parser();
+ method public static com.android.ahat.heapdump.AhatSnapshot parseHeapDump(java.io.File, com.android.ahat.proguard.ProguardMap) throws com.android.ahat.heapdump.HprofFormatException, java.io.IOException;
+ method public static com.android.ahat.heapdump.AhatSnapshot parseHeapDump(java.nio.ByteBuffer, com.android.ahat.proguard.ProguardMap) throws com.android.ahat.heapdump.HprofFormatException, java.io.IOException;
+ }
+
+ public class PathElement implements com.android.ahat.heapdump.Diffable {
+ ctor public PathElement(com.android.ahat.heapdump.AhatInstance, java.lang.String);
+ method public com.android.ahat.heapdump.PathElement getBaseline();
+ method public boolean isPlaceHolder();
+ field public final java.lang.String field;
+ field public final com.android.ahat.heapdump.AhatInstance instance;
+ field public boolean isDominator;
+ }
+
+ public final class RootType extends java.lang.Enum {
+ method public static com.android.ahat.heapdump.RootType valueOf(java.lang.String);
+ method public static final com.android.ahat.heapdump.RootType[] values();
+ enum_constant public static final com.android.ahat.heapdump.RootType DEBUGGER;
+ enum_constant public static final com.android.ahat.heapdump.RootType FINALIZING;
+ enum_constant public static final com.android.ahat.heapdump.RootType INTERNED_STRING;
+ enum_constant public static final com.android.ahat.heapdump.RootType JAVA_FRAME;
+ enum_constant public static final com.android.ahat.heapdump.RootType JNI_GLOBAL;
+ enum_constant public static final com.android.ahat.heapdump.RootType JNI_LOCAL;
+ enum_constant public static final com.android.ahat.heapdump.RootType JNI_MONITOR;
+ enum_constant public static final com.android.ahat.heapdump.RootType MONITOR;
+ enum_constant public static final com.android.ahat.heapdump.RootType NATIVE_STACK;
+ enum_constant public static final com.android.ahat.heapdump.RootType STICKY_CLASS;
+ enum_constant public static final com.android.ahat.heapdump.RootType THREAD;
+ enum_constant public static final com.android.ahat.heapdump.RootType THREAD_BLOCK;
+ enum_constant public static final com.android.ahat.heapdump.RootType UNKNOWN;
+ enum_constant public static final com.android.ahat.heapdump.RootType VM_INTERNAL;
+ }
+
+ public class Site implements com.android.ahat.heapdump.Diffable {
+ method public com.android.ahat.heapdump.Site findSite(long);
+ method public com.android.ahat.heapdump.Site getBaseline();
+ method public java.util.List<com.android.ahat.heapdump.Site> getChildren();
+ method public java.lang.String getFilename();
+ method public long getId();
+ method public int getLineNumber();
+ method public java.lang.String getMethodName();
+ method public void getObjects(java.lang.String, java.lang.String, java.util.Collection<com.android.ahat.heapdump.AhatInstance>);
+ method public java.util.List<com.android.ahat.heapdump.Site.ObjectsInfo> getObjectsInfos();
+ method public com.android.ahat.heapdump.Site getParent();
+ method public java.lang.String getSignature();
+ method public com.android.ahat.heapdump.Size getSize(com.android.ahat.heapdump.AhatHeap);
+ method public com.android.ahat.heapdump.Size getTotalSize();
+ method public boolean isPlaceHolder();
+ }
+
+ public static class Site.ObjectsInfo implements com.android.ahat.heapdump.Diffable {
+ ctor public Site.ObjectsInfo(com.android.ahat.heapdump.AhatHeap, com.android.ahat.heapdump.AhatClassObj);
+ method public com.android.ahat.heapdump.Site.ObjectsInfo getBaseline();
+ method public java.lang.String getClassName();
+ method public boolean isPlaceHolder();
+ method public void setBaseline(com.android.ahat.heapdump.Site.ObjectsInfo);
+ field public com.android.ahat.heapdump.AhatClassObj classObj;
+ field public com.android.ahat.heapdump.AhatHeap heap;
+ field public com.android.ahat.heapdump.Size numBytes;
+ field public long numInstances;
+ }
+
+ public class Size {
+ ctor public Size(long, long);
+ method public long getJavaSize();
+ method public long getRegisteredNativeSize();
+ method public long getSize();
+ method public boolean isZero();
+ method public com.android.ahat.heapdump.Size plus(com.android.ahat.heapdump.Size);
+ method public com.android.ahat.heapdump.Size plusRegisteredNativeSize(long);
+ field public static com.android.ahat.heapdump.Size ZERO;
+ }
+
+ public class Sort {
+ ctor public Sort();
+ method public static java.util.Comparator<com.android.ahat.heapdump.AhatInstance> defaultInstanceCompare(com.android.ahat.heapdump.AhatSnapshot);
+ method public static java.util.Comparator<com.android.ahat.heapdump.Site> defaultSiteCompare(com.android.ahat.heapdump.AhatSnapshot);
+ field public static final java.util.Comparator<com.android.ahat.heapdump.FieldValue> FIELD_VALUE_BY_NAME;
+ field public static final java.util.Comparator<com.android.ahat.heapdump.FieldValue> FIELD_VALUE_BY_TYPE;
+ field public static final java.util.Comparator<com.android.ahat.heapdump.AhatInstance> INSTANCE_BY_TOTAL_RETAINED_SIZE;
+ field public static final java.util.Comparator<com.android.ahat.heapdump.Site.ObjectsInfo> OBJECTS_INFO_BY_CLASS_NAME;
+ field public static final java.util.Comparator<com.android.ahat.heapdump.Site.ObjectsInfo> OBJECTS_INFO_BY_HEAP_NAME;
+ field public static final java.util.Comparator<com.android.ahat.heapdump.Site.ObjectsInfo> OBJECTS_INFO_BY_SIZE;
+ field public static final java.util.Comparator<com.android.ahat.heapdump.Site> SITE_BY_TOTAL_SIZE;
+ field public static final java.util.Comparator<com.android.ahat.heapdump.Size> SIZE_BY_SIZE;
+ }
+
+ public static class Sort.InstanceByHeapRetainedSize implements java.util.Comparator {
+ ctor public Sort.InstanceByHeapRetainedSize(com.android.ahat.heapdump.AhatHeap);
+ method public int compare(com.android.ahat.heapdump.AhatInstance, com.android.ahat.heapdump.AhatInstance);
+ }
+
+ public static class Sort.SiteByHeapSize implements java.util.Comparator {
+ ctor public Sort.SiteByHeapSize(com.android.ahat.heapdump.AhatHeap);
+ method public int compare(com.android.ahat.heapdump.Site, com.android.ahat.heapdump.Site);
+ }
+
+ public static class Sort.WithPriority<T> implements java.util.Comparator {
+ ctor public Sort.WithPriority(java.util.Comparator<T>...);
+ ctor public Sort.WithPriority(java.util.List<java.util.Comparator<T>>);
+ method public int compare(T, T);
+ }
+
+ public final class Type extends java.lang.Enum {
+ method public static com.android.ahat.heapdump.Type valueOf(java.lang.String);
+ method public static final com.android.ahat.heapdump.Type[] values();
+ enum_constant public static final com.android.ahat.heapdump.Type BOOLEAN;
+ enum_constant public static final com.android.ahat.heapdump.Type BYTE;
+ enum_constant public static final com.android.ahat.heapdump.Type CHAR;
+ enum_constant public static final com.android.ahat.heapdump.Type DOUBLE;
+ enum_constant public static final com.android.ahat.heapdump.Type FLOAT;
+ enum_constant public static final com.android.ahat.heapdump.Type INT;
+ enum_constant public static final com.android.ahat.heapdump.Type LONG;
+ enum_constant public static final com.android.ahat.heapdump.Type OBJECT;
+ enum_constant public static final com.android.ahat.heapdump.Type SHORT;
+ field public final java.lang.String name;
+ }
+
+ public abstract class Value {
+ ctor public Value();
+ method public com.android.ahat.heapdump.AhatInstance asAhatInstance();
+ method public java.lang.Byte asByte();
+ method public java.lang.Character asChar();
+ method public java.lang.Integer asInteger();
+ method public java.lang.Long asLong();
+ method public abstract boolean equals(java.lang.Object);
+ method public com.android.ahat.heapdump.Value getBaseline();
+ method public static com.android.ahat.heapdump.Value getBaseline(com.android.ahat.heapdump.Value);
+ method public static com.android.ahat.heapdump.Type getType(com.android.ahat.heapdump.Value);
+ method public boolean isAhatInstance();
+ method public boolean isInteger();
+ method public boolean isLong();
+ method public static com.android.ahat.heapdump.Value pack(com.android.ahat.heapdump.AhatInstance);
+ method public static com.android.ahat.heapdump.Value pack(boolean);
+ method public static com.android.ahat.heapdump.Value pack(char);
+ method public static com.android.ahat.heapdump.Value pack(float);
+ method public static com.android.ahat.heapdump.Value pack(double);
+ method public static com.android.ahat.heapdump.Value pack(byte);
+ method public static com.android.ahat.heapdump.Value pack(short);
+ method public static com.android.ahat.heapdump.Value pack(int);
+ method public static com.android.ahat.heapdump.Value pack(long);
+ method public abstract java.lang.String toString();
+ }
+
+}
+
+package com.android.ahat.proguard {
+
+ public class ProguardMap {
+ ctor public ProguardMap();
+ method public java.lang.String getClassName(java.lang.String);
+ method public java.lang.String getFieldName(java.lang.String, java.lang.String);
+ method public com.android.ahat.proguard.ProguardMap.Frame getFrame(java.lang.String, java.lang.String, java.lang.String, java.lang.String, int);
+ method public void readFromFile(java.io.File) throws java.io.FileNotFoundException, java.io.IOException, java.text.ParseException;
+ method public void readFromReader(java.io.Reader) throws java.io.IOException, java.text.ParseException;
+ }
+
+ public static class ProguardMap.Frame {
+ field public final java.lang.String filename;
+ field public final int line;
+ field public final java.lang.String method;
+ field public final java.lang.String signature;
+ }
+
+}
+
diff --git a/tools/ahat/etc/ahat_api_msg.txt b/tools/ahat/etc/ahat_api_msg.txt
new file mode 100644
index 0000000000..d0d04685d6
--- /dev/null
+++ b/tools/ahat/etc/ahat_api_msg.txt
@@ -0,0 +1,5 @@
+The public API for ahat.jar has changed.
+
+Please verify whether this change to the API is intentional and
+whether it may break any current users of the API. If the API change
+is intentional, run 'm ahat-update-api' to update the recorded API.
diff --git a/tools/ahat/etc/ahat_removed_api.txt b/tools/ahat/etc/ahat_removed_api.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tools/ahat/etc/ahat_removed_api.txt
diff --git a/tools/ahat/src/main/com/android/ahat/HtmlDoc.java b/tools/ahat/src/main/com/android/ahat/HtmlDoc.java
index 5a22fc75fe..d5106dc1dd 100644
--- a/tools/ahat/src/main/com/android/ahat/HtmlDoc.java
+++ b/tools/ahat/src/main/com/android/ahat/HtmlDoc.java
@@ -23,7 +23,7 @@ import java.util.List;
/**
* An Html implementation of Doc.
*/
-public class HtmlDoc implements Doc {
+class HtmlDoc implements Doc {
private PrintStream ps;
private Column[] mCurrentTableColumns;
diff --git a/tools/ahat/src/main/com/android/ahat/HtmlEscaper.java b/tools/ahat/src/main/com/android/ahat/HtmlEscaper.java
index 75a68277d3..06ffca2792 100644
--- a/tools/ahat/src/main/com/android/ahat/HtmlEscaper.java
+++ b/tools/ahat/src/main/com/android/ahat/HtmlEscaper.java
@@ -16,7 +16,7 @@
package com.android.ahat;
-public class HtmlEscaper {
+class HtmlEscaper {
/**
* Escape html characters in the input string.
*/
diff --git a/tools/ahat/src/main/com/android/ahat/Main.java b/tools/ahat/src/main/com/android/ahat/Main.java
index a0fbf777dd..048573e915 100644
--- a/tools/ahat/src/main/com/android/ahat/Main.java
+++ b/tools/ahat/src/main/com/android/ahat/Main.java
@@ -31,8 +31,10 @@ import java.text.ParseException;
import java.util.concurrent.Executors;
public class Main {
+ private Main() {
+ }
- public static void help(PrintStream out) {
+ private static void help(PrintStream out) {
out.println("java -jar ahat.jar [OPTIONS] FILE");
out.println(" Launch an http server for viewing the given Android heap dump FILE.");
out.println("");
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java
index 50a4805bed..ccdd6e4df7 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java
@@ -34,7 +34,7 @@ public class AhatArrayInstance extends AhatInstance {
private byte[] mByteArray; // null if not a byte array.
private char[] mCharArray; // null if not a char array.
- public AhatArrayInstance(long id) {
+ AhatArrayInstance(long id) {
super(id);
}
@@ -176,7 +176,7 @@ public class AhatArrayInstance extends AhatInstance {
}
@Override
- protected long getExtraJavaSize() {
+ long getExtraJavaSize() {
int length = getLength();
if (length == 0) {
return 0;
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
index 94efa5049f..cb9d959508 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
@@ -27,7 +27,7 @@ public class AhatClassInstance extends AhatInstance {
// the field types and names to save memory.
private Value[] mFields;
- public AhatClassInstance(long id) {
+ AhatClassInstance(long id) {
super(id);
}
@@ -36,7 +36,7 @@ public class AhatClassInstance extends AhatInstance {
}
@Override
- protected long getExtraJavaSize() {
+ long getExtraJavaSize() {
return 0;
}
@@ -244,7 +244,7 @@ public class AhatClassInstance extends AhatInstance {
}
@Override
- public RegisteredNativeAllocation asRegisteredNativeAllocation() {
+ RegisteredNativeAllocation asRegisteredNativeAllocation() {
if (!isInstanceOfClass("sun.misc.Cleaner")) {
return null;
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java
index be0f71306e..3babf76842 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java
@@ -29,7 +29,7 @@ public class AhatClassObj extends AhatInstance {
private long mStaticFieldsSize;
private long mInstanceSize;
- public AhatClassObj(long id, String className) {
+ AhatClassObj(long id, String className) {
super(id);
mClassName = className;
}
@@ -50,7 +50,7 @@ public class AhatClassObj extends AhatInstance {
}
@Override
- protected long getExtraJavaSize() {
+ long getExtraJavaSize() {
return mStaticFieldsSize;
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
index cb2d738f23..a9f819f710 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
@@ -64,7 +64,7 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
// 2. During dominators computation, to store the dominators computation state.
private Object mTemporaryUserData;
- public AhatInstance(long id) {
+ AhatInstance(long id) {
mId = id;
mBaseline = this;
}
@@ -101,7 +101,7 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
* For example, class objects will have extra size for static fields and
* array objects will have extra size for the array elements.
*/
- protected abstract long getExtraJavaSize();
+ abstract long getExtraJavaSize();
/**
* Returns the number of bytes belonging to the given heap that this instance
@@ -388,7 +388,7 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
return null;
}
- public static class RegisteredNativeAllocation {
+ static class RegisteredNativeAllocation {
public AhatInstance referent;
public long size;
};
@@ -397,7 +397,7 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
* Return the registered native allocation that this instance represents, if
* any. This is relevant for instances of sun.misc.Cleaner.
*/
- public RegisteredNativeAllocation asRegisteredNativeAllocation() {
+ RegisteredNativeAllocation asRegisteredNativeAllocation() {
return null;
}
@@ -428,8 +428,7 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
* Returns null if the given instance has no next instance to the gc root.
*/
private static PathElement getNextPathElementToGcRoot(AhatInstance inst) {
- AhatInstance parent = inst.mNextInstanceToGcRoot;
- if (parent == null) {
+ if (inst.isRoot()) {
return null;
}
return new PathElement(inst.mNextInstanceToGcRoot, inst.mNextInstanceToGcRootField);
@@ -452,7 +451,7 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
return null;
}
- public void setBaseline(AhatInstance baseline) {
+ void setBaseline(AhatInstance baseline) {
mBaseline = baseline;
}
@@ -471,11 +470,11 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
return new AhatPlaceHolderInstance(this);
}
- public void setTemporaryUserData(Object state) {
+ void setTemporaryUserData(Object state) {
mTemporaryUserData = state;
}
- public Object getTemporaryUserData() {
+ Object getTemporaryUserData() {
return mTemporaryUserData;
}
@@ -487,40 +486,64 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
* mHardReverseReferences
* mSoftReverseReferences
*/
- static void computeReverseReferences(AhatInstance root) {
- // Do a breadth first search to visit the nodes.
- Queue<Reference> bfs = new ArrayDeque<Reference>();
+ static void computeReverseReferences(SuperRoot root) {
+ // Start by doing a breadth first search through strong references.
+ // Then continue the breadth first search through weak references.
+ Queue<Reference> strong = new ArrayDeque<Reference>();
+ Queue<Reference> weak = new ArrayDeque<Reference>();
+
for (Reference ref : root.getReferences()) {
- bfs.add(ref);
+ strong.add(ref);
}
- while (!bfs.isEmpty()) {
- Reference ref = bfs.poll();
- if (ref.ref.mHardReverseReferences == null && ref.strong) {
- // This is the first time we are seeing ref.ref through a strong
- // reference.
+ while (!strong.isEmpty()) {
+ Reference ref = strong.poll();
+ assert ref.strong;
+
+ if (ref.ref.mNextInstanceToGcRoot == null) {
+ // This is the first time we have seen ref.ref.
ref.ref.mNextInstanceToGcRoot = ref.src;
ref.ref.mNextInstanceToGcRootField = ref.field;
ref.ref.mHardReverseReferences = new ArrayList<AhatInstance>();
+
for (Reference childRef : ref.ref.getReferences()) {
- bfs.add(childRef);
+ if (childRef.strong) {
+ strong.add(childRef);
+ } else {
+ weak.add(childRef);
+ }
}
}
- // Note: ref.src is null when the src is the SuperRoot.
- if (ref.src != null) {
- if (ref.strong) {
- ref.ref.mHardReverseReferences.add(ref.src);
- } else {
- if (ref.ref.mSoftReverseReferences == null) {
- ref.ref.mSoftReverseReferences = new ArrayList<AhatInstance>();
- if (ref.ref.mNextInstanceToGcRoot == null) {
- ref.ref.mNextInstanceToGcRoot = ref.src;
- ref.ref.mNextInstanceToGcRootField = ref.field;
- }
- }
- ref.ref.mSoftReverseReferences.add(ref.src);
+ // Note: We specifically exclude 'root' from the reverse references
+ // because it is a fake SuperRoot instance not present in the original
+ // heap dump.
+ if (ref.src != root) {
+ ref.ref.mHardReverseReferences.add(ref.src);
+ }
+ }
+
+ while (!weak.isEmpty()) {
+ Reference ref = weak.poll();
+
+ if (ref.ref.mNextInstanceToGcRoot == null) {
+ // This is the first time we have seen ref.ref.
+ ref.ref.mNextInstanceToGcRoot = ref.src;
+ ref.ref.mNextInstanceToGcRootField = ref.field;
+ ref.ref.mHardReverseReferences = new ArrayList<AhatInstance>();
+
+ for (Reference childRef : ref.ref.getReferences()) {
+ weak.add(childRef);
+ }
+ }
+
+ if (ref.strong) {
+ ref.ref.mHardReverseReferences.add(ref.src);
+ } else {
+ if (ref.ref.mSoftReverseReferences == null) {
+ ref.ref.mSoftReverseReferences = new ArrayList<AhatInstance>();
}
+ ref.ref.mSoftReverseReferences.add(ref.src);
}
}
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderClassObj.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderClassObj.java
index 07f5b50012..b8cdbddaec 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderClassObj.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderClassObj.java
@@ -22,7 +22,7 @@ package com.android.ahat.heapdump;
*
* This should be created through a call to newPlaceHolder();
*/
-public class AhatPlaceHolderClassObj extends AhatClassObj {
+class AhatPlaceHolderClassObj extends AhatClassObj {
AhatPlaceHolderClassObj(AhatClassObj baseline) {
super(-1, baseline.getClassName());
setBaseline(baseline);
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderInstance.java
index 884940370d..d65642561c 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderInstance.java
@@ -25,7 +25,7 @@ import java.util.List;
*
* This should be created through a call to AhatInstance.newPlaceHolder();
*/
-public class AhatPlaceHolderInstance extends AhatInstance {
+class AhatPlaceHolderInstance extends AhatInstance {
AhatPlaceHolderInstance(AhatInstance baseline) {
super(-1);
setBaseline(baseline);
@@ -36,7 +36,7 @@ public class AhatPlaceHolderInstance extends AhatInstance {
return Size.ZERO;
}
- @Override protected long getExtraJavaSize() {
+ @Override long getExtraJavaSize() {
return 0;
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java
index 945966cec7..59ce5d1c6c 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java
@@ -122,7 +122,7 @@ public class AhatSnapshot implements Diffable<AhatSnapshot> {
return site == null ? mRootSite : site;
}
- public void setBaseline(AhatSnapshot baseline) {
+ void setBaseline(AhatSnapshot baseline) {
mBaseline = baseline;
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/HprofFormatException.java b/tools/ahat/src/main/com/android/ahat/heapdump/HprofFormatException.java
index 0e128cd50a..256a3b46f6 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/HprofFormatException.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/HprofFormatException.java
@@ -17,11 +17,11 @@
package com.android.ahat.heapdump;
public class HprofFormatException extends Exception {
- public HprofFormatException(String msg) {
+ HprofFormatException(String msg) {
super(msg);
}
- public HprofFormatException(String msg, Exception cause) {
+ HprofFormatException(String msg, Exception cause) {
super(msg, cause);
}
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
index 756b7d2554..d7b1dd78d6 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
@@ -630,7 +630,7 @@ public class Parser {
}
@Override
- protected Type getType() {
+ Type getType() {
return Type.OBJECT;
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Reference.java b/tools/ahat/src/main/com/android/ahat/heapdump/Reference.java
index 980f2780b6..f1340bd07b 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Reference.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Reference.java
@@ -23,7 +23,7 @@ package com.android.ahat.heapdump;
* 'strong' is true if this is a strong reference, false if it is a
* weak/soft/other reference.
*/
-public class Reference {
+class Reference {
public final AhatInstance src;
public final String field;
public final AhatInstance ref;
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/RootType.java b/tools/ahat/src/main/com/android/ahat/heapdump/RootType.java
index af552ea2c9..734f889af6 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/RootType.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/RootType.java
@@ -32,7 +32,7 @@ public enum RootType {
JNI_MONITOR (1 << 12),
FINALIZING (1 << 13);
- public final int mask;
+ final int mask;
RootType(int mask) {
this.mask = mask;
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Site.java b/tools/ahat/src/main/com/android/ahat/heapdump/Site.java
index 523550ad2c..4978d52830 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Site.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Site.java
@@ -103,7 +103,7 @@ public class Site implements Diffable<Site> {
/**
* Construct a root site.
*/
- public Site(String name) {
+ Site(String name) {
this(null, name, "", "", 0);
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java b/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
index a2adbd2808..b01cffff72 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
@@ -21,20 +21,20 @@ import java.util.AbstractList;
import java.util.ArrayList;
import java.util.List;
-public class SuperRoot extends AhatInstance implements DominatorsComputation.Node {
+class SuperRoot extends AhatInstance implements DominatorsComputation.Node {
private List<AhatInstance> mRoots = new ArrayList<AhatInstance>();
private Object mDominatorsComputationState;
- public SuperRoot() {
+ SuperRoot() {
super(0);
}
- public void addRoot(AhatInstance root) {
+ void addRoot(AhatInstance root) {
mRoots.add(root);
}
@Override
- protected long getExtraJavaSize() {
+ long getExtraJavaSize() {
return 0;
}
@@ -54,7 +54,7 @@ public class SuperRoot extends AhatInstance implements DominatorsComputation.Nod
@Override
public Reference get(int index) {
String field = ".roots[" + Integer.toString(index) + "]";
- return new Reference(null, field, mRoots.get(index), true);
+ return new Reference(SuperRoot.this, field, mRoots.get(index), true);
}
};
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Type.java b/tools/ahat/src/main/com/android/ahat/heapdump/Type.java
index 726bc47cf2..40249615a2 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Type.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Type.java
@@ -28,7 +28,7 @@ public enum Type {
LONG("long", 8);
public final String name;
- public final int size;
+ final int size;
Type(String name, int size) {
this.name = name;
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Value.java b/tools/ahat/src/main/com/android/ahat/heapdump/Value.java
index 01fd25057d..eea427774b 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Value.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Value.java
@@ -67,7 +67,7 @@ public abstract class Value {
/**
* Return the type of the given value.
*/
- protected abstract Type getType();
+ abstract Type getType();
/**
* Returns true if the Value is an AhatInstance, as opposed to a Java
@@ -153,7 +153,7 @@ public abstract class Value {
}
@Override
- protected Type getType() {
+ Type getType() {
return Type.BOOLEAN;
}
@@ -184,7 +184,7 @@ public abstract class Value {
}
@Override
- protected Type getType() {
+ Type getType() {
return Type.BYTE;
}
@@ -215,7 +215,7 @@ public abstract class Value {
}
@Override
- protected Type getType() {
+ Type getType() {
return Type.CHAR;
}
@@ -241,7 +241,7 @@ public abstract class Value {
}
@Override
- protected Type getType() {
+ Type getType() {
return Type.DOUBLE;
}
@@ -267,7 +267,7 @@ public abstract class Value {
}
@Override
- protected Type getType() {
+ Type getType() {
return Type.FLOAT;
}
@@ -304,7 +304,7 @@ public abstract class Value {
}
@Override
- protected Type getType() {
+ Type getType() {
return Type.OBJECT;
}
@@ -345,7 +345,7 @@ public abstract class Value {
}
@Override
- protected Type getType() {
+ Type getType() {
return Type.INT;
}
@@ -381,7 +381,7 @@ public abstract class Value {
}
@Override
- protected Type getType() {
+ Type getType() {
return Type.LONG;
}
@@ -407,7 +407,7 @@ public abstract class Value {
}
@Override
- protected Type getType() {
+ Type getType() {
return Type.SHORT;
}
diff --git a/tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java b/tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java
index 50c110aad4..131bbf3cf6 100644
--- a/tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java
+++ b/tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java
@@ -101,7 +101,7 @@ public class ProguardMap {
private Map<String, ClassData> mClassesFromObfuscatedName = new HashMap<String, ClassData>();
public static class Frame {
- public Frame(String method, String signature, String filename, int line) {
+ Frame(String method, String signature, String filename, int line) {
this.method = method;
this.signature = signature;
this.filename = filename;
diff --git a/tools/ahat/src/test-dump/Main.java b/tools/ahat/src/test-dump/Main.java
index 333d28c214..079be7da81 100644
--- a/tools/ahat/src/test-dump/Main.java
+++ b/tools/ahat/src/test-dump/Main.java
@@ -93,6 +93,8 @@ public class Main {
null};
public Reference aLongStrongPathToSamplePathObject;
public WeakReference aShortWeakPathToSamplePathObject;
+ public WeakReference aWeakRefToGcRoot = new WeakReference(Main.class);
+ public SoftReference aWeakChain = new SoftReference(new Reference(new Reference(new Object())));
public Object[] basicStringRef;
public AddedObject addedObject;
public UnchangedObject unchangedObject = new UnchangedObject();
@@ -126,10 +128,11 @@ public class Main {
Main.class.getClassLoader(), 0x12345, 50000);
registry.registerNativeAllocation(anObject, 0xABCDABCD);
- aLongStrongPathToSamplePathObject = new Reference(new Reference(new Object()));
- aShortWeakPathToSamplePathObject = new WeakReference(
- ((Reference)aLongStrongPathToSamplePathObject.referent).referent,
- referenceQueue);
+ {
+ Object object = new Object();
+ aLongStrongPathToSamplePathObject = new Reference(new Reference(new Reference(object)));
+ aShortWeakPathToSamplePathObject = new WeakReference(new Reference(object));
+ }
addedObject = baseline ? null : new AddedObject();
removedObject = baseline ? new RemovedObject() : null;
diff --git a/tools/ahat/src/test/com/android/ahat/InstanceTest.java b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
index a4908fd0ab..8fbb8849f0 100644
--- a/tools/ahat/src/test/com/android/ahat/InstanceTest.java
+++ b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
@@ -274,12 +274,20 @@ public class InstanceTest {
public void gcRootPathNotWeak() throws IOException {
TestDump dump = TestDump.getTestDump();
- AhatInstance strong = dump.getDumpedAhatInstance("aLongStrongPathToSamplePathObject");
- AhatInstance strong2 = strong.getField("referent").asAhatInstance();
- AhatInstance object = strong2.getField("referent").asAhatInstance();
+ // The test dump is set up to have the following graph:
+ // -S-> strong1 -S-> strong2 -S-> strong3 -S-> object
+ // -S-> weak1 -W-> weak2 ------------------S->-/
+ // The gc root path should go through the longer chain of strong
+ // references, not the shorter chain with weak references (even though the
+ // last element in the shorter chain is a strong reference).
+
+ AhatInstance strong1 = dump.getDumpedAhatInstance("aLongStrongPathToSamplePathObject");
+ AhatInstance strong2 = strong1.getField("referent").asAhatInstance();
+ AhatInstance strong3 = strong2.getField("referent").asAhatInstance();
+ AhatInstance object = strong3.getField("referent").asAhatInstance();
List<PathElement> path = object.getPathFromGcRoot();
- assertEquals(strong2, path.get(path.size() - 2).instance);
+ assertEquals(strong3, path.get(path.size() - 2).instance);
}
@Test
@@ -368,6 +376,39 @@ public class InstanceTest {
}
@Test
+ public void weakRefToGcRoot() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ AhatInstance ref = dump.getDumpedAhatInstance("aWeakRefToGcRoot");
+
+ // The weak reference points to Main.class, which we expect will be marked
+ // as a GC root. In theory Main.class doesn't have to be a GC root, in
+ // which case this test case will need to be revised.
+ AhatInstance root = ref.getField("referent").asAhatInstance();
+ assertTrue(root.isRoot());
+
+ // We had a bug in the past where weak references to GC roots caused the
+ // roots to be incorrectly be considered weakly reachable.
+ assertTrue(root.isStronglyReachable());
+ assertFalse(root.isWeaklyReachable());
+ }
+
+ @Test
+ public void weakReferenceChain() throws IOException {
+ // If the only reference to a chain of strongly referenced objects is a
+ // weak reference, then all of the objects should be considered weakly
+ // reachable.
+ TestDump dump = TestDump.getTestDump();
+ AhatInstance ref = dump.getDumpedAhatInstance("aWeakChain");
+ AhatInstance weak1 = ref.getField("referent").asAhatInstance();
+ AhatInstance weak2 = weak1.getField("referent").asAhatInstance();
+ AhatInstance weak3 = weak2.getField("referent").asAhatInstance();
+ assertTrue(ref.isStronglyReachable());
+ assertTrue(weak1.isWeaklyReachable());
+ assertTrue(weak2.isWeaklyReachable());
+ assertTrue(weak3.isWeaklyReachable());
+ }
+
+ @Test
public void reverseReferences() throws IOException {
TestDump dump = TestDump.getTestDump();
AhatInstance obj = dump.getDumpedAhatInstance("anObject");
diff --git a/tools/breakpoint-logger/Android.bp b/tools/breakpoint-logger/Android.bp
new file mode 100644
index 0000000000..67b423abf1
--- /dev/null
+++ b/tools/breakpoint-logger/Android.bp
@@ -0,0 +1,66 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+
+cc_defaults {
+ name: "breakpointlogger-defaults",
+ host_supported: true,
+ srcs: ["breakpoint_logger.cc"],
+ defaults: ["art_defaults"],
+
+ // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+ // to be same ISA as what it is attached to.
+ compile_multilib: "both",
+
+ shared_libs: [
+ "libbase",
+ ],
+ target: {
+ android: {
+ },
+ host: {
+ },
+ },
+ header_libs: [
+ "libopenjdkjvmti_headers",
+ ],
+ multilib: {
+ lib32: {
+ suffix: "32",
+ },
+ lib64: {
+ suffix: "64",
+ },
+ },
+ symlink_preferred_arch: true,
+}
+
+art_cc_library {
+ name: "libbreakpointlogger",
+ defaults: ["breakpointlogger-defaults"],
+ shared_libs: [
+ ],
+}
+
+art_cc_library {
+ name: "libbreakpointloggerd",
+ defaults: [
+ "art_debug_defaults",
+ "breakpointlogger-defaults",
+ ],
+ shared_libs: [],
+}
diff --git a/tools/breakpoint-logger/README.md b/tools/breakpoint-logger/README.md
new file mode 100644
index 0000000000..d7ffb3440f
--- /dev/null
+++ b/tools/breakpoint-logger/README.md
@@ -0,0 +1,54 @@
+# breakpointlogger
+
+breakpointlogger is a JVMTI agent that lets one set breakpoints that are logged
+when they are hit.
+
+# Usage
+### Build
+> `make libbreakpointlogger` # or 'make libbreakpointloggerd' with debugging checks enabled
+
+The libraries will be built for 32-bit, 64-bit, host and target. Below examples
+assume you want to use the 64-bit version.
+
+### Command Line
+
+The agent is loaded using -agentpath like normal. It takes arguments in the
+following format:
+> `:class_descriptor:->:methodName::method_sig:@:breakpoint_location:,[...]`
+
+* The breakpoint\_location is a number that's a valid jlocation for the runtime
+ being used. On ART this is a dex-pc. Dex-pcs can be found using tools such as
+ dexdump and are uint16\_t-offsets from the start of the method. On other
+ runtimes jlocations might represent other things.
+
+* Multiple breakpoints can be included in the options, separated with ','s.
+
+* Unlike with most normal debuggers the agent will load the class immediately to
+ set the breakpoint. This means that classes might be initialized earlier than
+ one might expect. This also means that one cannot set breakpoints on classes
+ that cannot be found using standard or bootstrap classloader at startup.
+
+* Deviating from this format or including a breakpoint that cannot be found at
+ startup will cause the runtime to abort.
+
+#### ART
+> `art -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so '-agentpath:libbreakpointlogger.so=Lclass/Name;->methodName()V@0' -cp tmp/java/helloworld.dex -Xint helloworld`
+
+* `-Xplugin` and `-agentpath` need to be used, otherwise the agent will fail during init.
+* If using `libartd.so`, make sure to use the debug version of jvmti.
+
+#### RI
+> `java '-agentpath:libbreakpointlogger.so=Lclass/Name;->methodName()V@0' -cp tmp/helloworld/classes helloworld`
+
+### Output
+A normal run will look something like this:
+
+ % ./test/run-test --host --dev --with-agent 'libbreakpointlogger.so=LMain;->main([Ljava/lang/String;)V@0' 001-HelloWorld
+ <normal output removed>
+ dalvikvm32 W 10-25 10:39:09 18063 18063 breakpointlogger.cc:277] Breakpoint at location: 0x00000000 in method LMain;->main([Ljava/lang/String;)V (source: Main.java:13) thread: main
+ Hello, world!
+
+ % ./test/run-test --jvm --dev --with-agent 'libbreakpointlogger.so=LMain;->main([Ljava/lang/String;)V@0' 001-HelloWorld
+ <normal output removed>
+ java W 10-25 10:39:09 18063 18063 breakpointlogger.cc:277] Breakpoint at location: 0x00000000 in method LMain;->main([Ljava/lang/String;)V (source: Main.java:13) thread: main
+ Hello, world!
diff --git a/tools/breakpoint-logger/breakpoint_logger.cc b/tools/breakpoint-logger/breakpoint_logger.cc
new file mode 100644
index 0000000000..b48a1788e3
--- /dev/null
+++ b/tools/breakpoint-logger/breakpoint_logger.cc
@@ -0,0 +1,447 @@
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android-base/logging.h>
+#include <atomic>
+#include <iostream>
+#include <iomanip>
+#include <jni.h>
+#include <jvmti.h>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace breakpoint_logger {
+
+struct SingleBreakpointTarget {
+ std::string class_name;
+ std::string method_name;
+ std::string method_sig;
+ jlocation location;
+};
+
+struct BreakpointTargets {
+ std::vector<SingleBreakpointTarget> bps;
+};
+
+static void VMInitCB(jvmtiEnv* jvmti, JNIEnv* env, jthread thr ATTRIBUTE_UNUSED) {
+ BreakpointTargets* all_targets = nullptr;
+ jvmtiError err = jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&all_targets));
+ if (err != JVMTI_ERROR_NONE || all_targets == nullptr) {
+ env->FatalError("unable to get breakpoint targets");
+ }
+ for (const SingleBreakpointTarget& target : all_targets->bps) {
+ jclass k = env->FindClass(target.class_name.c_str());
+ if (env->ExceptionCheck()) {
+ env->ExceptionDescribe();
+ env->FatalError("Could not find class!");
+ return;
+ }
+ jmethodID m = env->GetMethodID(k, target.method_name.c_str(), target.method_sig.c_str());
+ if (env->ExceptionCheck()) {
+ env->ExceptionClear();
+ m = env->GetStaticMethodID(k, target.method_name.c_str(), target.method_sig.c_str());
+ if (env->ExceptionCheck()) {
+ env->ExceptionDescribe();
+ env->FatalError("Could not find method!");
+ return;
+ }
+ }
+ err = jvmti->SetBreakpoint(m, target.location);
+ if (err != JVMTI_ERROR_NONE) {
+ env->FatalError("unable to set breakpoint");
+ return;
+ }
+ env->DeleteLocalRef(k);
+ }
+}
+
+class ScopedThreadInfo {
+ public:
+ ScopedThreadInfo(jvmtiEnv* jvmti_env, JNIEnv* env, jthread thread)
+ : jvmti_env_(jvmti_env), env_(env), free_name_(false) {
+ memset(&info_, 0, sizeof(info_));
+ if (thread == nullptr) {
+ info_.name = const_cast<char*>("<NULLPTR>");
+ } else if (jvmti_env->GetThreadInfo(thread, &info_) != JVMTI_ERROR_NONE) {
+ info_.name = const_cast<char*>("<UNKNOWN THREAD>");
+ } else {
+ free_name_ = true;
+ }
+ }
+
+ ~ScopedThreadInfo() {
+ if (free_name_) {
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(info_.name));
+ }
+ env_->DeleteLocalRef(info_.thread_group);
+ env_->DeleteLocalRef(info_.context_class_loader);
+ }
+
+ const char* GetName() const {
+ return info_.name;
+ }
+
+ private:
+ jvmtiEnv* jvmti_env_;
+ JNIEnv* env_;
+ bool free_name_;
+ jvmtiThreadInfo info_;
+};
+
+class ScopedClassInfo {
+ public:
+ ScopedClassInfo(jvmtiEnv* jvmti_env, jclass c)
+ : jvmti_env_(jvmti_env),
+ class_(c),
+ name_(nullptr),
+ generic_(nullptr),
+ file_(nullptr),
+ debug_ext_(nullptr) {}
+
+ ~ScopedClassInfo() {
+ if (class_ != nullptr) {
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(file_));
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(debug_ext_));
+ }
+ }
+
+ bool Init() {
+ if (class_ == nullptr) {
+ name_ = const_cast<char*>("<NONE>");
+ generic_ = const_cast<char*>("<NONE>");
+ return true;
+ } else {
+ jvmtiError ret1 = jvmti_env_->GetSourceFileName(class_, &file_);
+ jvmtiError ret2 = jvmti_env_->GetSourceDebugExtension(class_, &debug_ext_);
+ return jvmti_env_->GetClassSignature(class_, &name_, &generic_) == JVMTI_ERROR_NONE &&
+ ret1 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
+ ret1 != JVMTI_ERROR_INVALID_CLASS &&
+ ret2 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
+ ret2 != JVMTI_ERROR_INVALID_CLASS;
+ }
+ }
+
+ jclass GetClass() const {
+ return class_;
+ }
+ const char* GetName() const {
+ return name_;
+ }
+ // Generic type parameters, whatever is in the <> for a class
+ const char* GetGeneric() const {
+ return generic_;
+ }
+ const char* GetSourceDebugExtension() const {
+ if (debug_ext_ == nullptr) {
+ return "<UNKNOWN_SOURCE_DEBUG_EXTENSION>";
+ } else {
+ return debug_ext_;
+ }
+ }
+ const char* GetSourceFileName() const {
+ if (file_ == nullptr) {
+ return "<UNKNOWN_FILE>";
+ } else {
+ return file_;
+ }
+ }
+
+ private:
+ jvmtiEnv* jvmti_env_;
+ jclass class_;
+ char* name_;
+ char* generic_;
+ char* file_;
+ char* debug_ext_;
+};
+
+class ScopedMethodInfo {
+ public:
+ ScopedMethodInfo(jvmtiEnv* jvmti_env, JNIEnv* env, jmethodID method)
+ : jvmti_env_(jvmti_env),
+ env_(env),
+ method_(method),
+ declaring_class_(nullptr),
+ class_info_(nullptr),
+ name_(nullptr),
+ signature_(nullptr),
+ generic_(nullptr),
+ first_line_(-1) {}
+
+ ~ScopedMethodInfo() {
+ env_->DeleteLocalRef(declaring_class_);
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(signature_));
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+ }
+
+ bool Init() {
+ if (jvmti_env_->GetMethodDeclaringClass(method_, &declaring_class_) != JVMTI_ERROR_NONE) {
+ return false;
+ }
+ class_info_.reset(new ScopedClassInfo(jvmti_env_, declaring_class_));
+ jint nlines;
+ jvmtiLineNumberEntry* lines;
+ jvmtiError err = jvmti_env_->GetLineNumberTable(method_, &nlines, &lines);
+ if (err == JVMTI_ERROR_NONE) {
+ if (nlines > 0) {
+ first_line_ = lines[0].line_number;
+ }
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(lines));
+ } else if (err != JVMTI_ERROR_ABSENT_INFORMATION &&
+ err != JVMTI_ERROR_NATIVE_METHOD) {
+ return false;
+ }
+ return class_info_->Init() &&
+ (jvmti_env_->GetMethodName(method_, &name_, &signature_, &generic_) == JVMTI_ERROR_NONE);
+ }
+
+ const ScopedClassInfo& GetDeclaringClassInfo() const {
+ return *class_info_;
+ }
+
+ jclass GetDeclaringClass() const {
+ return declaring_class_;
+ }
+
+ const char* GetName() const {
+ return name_;
+ }
+
+ const char* GetSignature() const {
+ return signature_;
+ }
+
+ const char* GetGeneric() const {
+ return generic_;
+ }
+
+ jint GetFirstLine() const {
+ return first_line_;
+ }
+
+ private:
+ jvmtiEnv* jvmti_env_;
+ JNIEnv* env_;
+ jmethodID method_;
+ jclass declaring_class_;
+ std::unique_ptr<ScopedClassInfo> class_info_;
+ char* name_;
+ char* signature_;
+ char* generic_;
+ jint first_line_;
+
+ friend std::ostream& operator<<(std::ostream& os, ScopedMethodInfo const& method);
+};
+
+std::ostream& operator<<(std::ostream& os, const ScopedMethodInfo* method) {
+ return os << *method;
+}
+
+std::ostream& operator<<(std::ostream& os, ScopedMethodInfo const& method) {
+ return os << method.GetDeclaringClassInfo().GetName() << "->" << method.GetName()
+ << method.GetSignature() << " (source: "
+ << method.GetDeclaringClassInfo().GetSourceFileName() << ":" << method.GetFirstLine()
+ << ")";
+}
+
+static void BreakpointCB(jvmtiEnv* jvmti_env,
+ JNIEnv* env,
+ jthread thread,
+ jmethodID method,
+ jlocation location) {
+ ScopedThreadInfo info(jvmti_env, env, thread);
+ ScopedMethodInfo method_info(jvmti_env, env, method);
+ if (!method_info.Init()) {
+ LOG(ERROR) << "Unable to get method info!";
+ return;
+ }
+ LOG(WARNING) << "Breakpoint at location: 0x" << std::setw(8) << std::setfill('0') << std::hex
+ << location << " in method " << method_info << " thread: " << info.GetName();
+}
+
+static std::string SubstrOf(const std::string& s, size_t start, size_t end) {
+ if (end == std::string::npos) {
+ end = s.size();
+ }
+ if (end == start) {
+ return "";
+ }
+ CHECK_GT(end, start) << "cannot get substr of " << s;
+ return s.substr(start, end - start);
+}
+
+static bool ParseSingleBreakpoint(const std::string& bp, /*out*/SingleBreakpointTarget* target) {
+ std::string option = bp;
+ if (option.empty() || option[0] != 'L' || option.find(';') == std::string::npos) {
+ LOG(ERROR) << option << " doesn't look like it has a class name";
+ return false;
+ }
+ target->class_name = SubstrOf(option, 1, option.find(';'));
+
+ option = SubstrOf(option, option.find(';') + 1, std::string::npos);
+ if (option.size() < 2 || option[0] != '-' || option[1] != '>') {
+ LOG(ERROR) << bp << " doesn't seem to indicate a method, expected ->";
+ return false;
+ }
+ option = SubstrOf(option, 2, std::string::npos);
+ size_t sig_start = option.find('(');
+ size_t loc_start = option.find('@');
+ if (option.empty() || sig_start == std::string::npos) {
+ LOG(ERROR) << bp << " doesn't seem to have a method sig!";
+ return false;
+ } else if (loc_start == std::string::npos ||
+ loc_start < sig_start ||
+ loc_start + 1 >= option.size()) {
+ LOG(ERROR) << bp << " doesn't seem to have a valid location!";
+ return false;
+ }
+ target->method_name = SubstrOf(option, 0, sig_start);
+ target->method_sig = SubstrOf(option, sig_start, loc_start);
+ target->location = std::stol(SubstrOf(option, loc_start + 1, std::string::npos));
+ return true;
+}
+
+static std::string RemoveLastOption(const std::string& op) {
+ if (op.find(',') == std::string::npos) {
+ return "";
+ } else {
+ return SubstrOf(op, op.find(',') + 1, std::string::npos);
+ }
+}
+
+// Fills targets with the breakpoints to add.
+// Lname/of/Klass;->methodName(Lsig/of/Method)Lreturn/Type;@location,<...>
+static bool ParseArgs(const std::string& start_options,
+ /*out*/BreakpointTargets* targets) {
+ for (std::string options = start_options;
+ !options.empty();
+ options = RemoveLastOption(options)) {
+ SingleBreakpointTarget target;
+ std::string next = SubstrOf(options, 0, options.find(','));
+ if (!ParseSingleBreakpoint(next, /*out*/ &target)) {
+ LOG(ERROR) << "Unable to parse breakpoint from " << next;
+ return false;
+ }
+ targets->bps.push_back(target);
+ }
+ return true;
+}
+
+enum class StartType {
+ OnAttach, OnLoad,
+};
+
+static jint AgentStart(StartType start,
+ JavaVM* vm,
+ char* options,
+ void* reserved ATTRIBUTE_UNUSED) {
+ jvmtiEnv* jvmti = nullptr;
+ jvmtiError error = JVMTI_ERROR_NONE;
+ {
+ jint res = 0;
+ res = vm->GetEnv(reinterpret_cast<void**>(&jvmti), JVMTI_VERSION_1_1);
+
+ if (res != JNI_OK || jvmti == nullptr) {
+ LOG(ERROR) << "Unable to access JVMTI, error code " << res;
+ return JNI_ERR;
+ }
+ }
+
+ void* bp_target_mem = nullptr;
+ error = jvmti->Allocate(sizeof(BreakpointTargets),
+ reinterpret_cast<unsigned char**>(&bp_target_mem));
+ if (error != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to alloc memory for breakpoint target data";
+ return JNI_ERR;
+ }
+
+ BreakpointTargets* data = new(bp_target_mem) BreakpointTargets;
+ error = jvmti->SetEnvironmentLocalStorage(data);
+ if (error != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to set local storage";
+ return JNI_ERR;
+ }
+
+ if (!ParseArgs(options, /*out*/data)) {
+ LOG(ERROR) << "failed to parse breakpoint list!";
+ return JNI_ERR;
+ }
+
+ jvmtiCapabilities caps {}; // NOLINT [readability/braces]
+ caps.can_generate_breakpoint_events = JNI_TRUE;
+ caps.can_get_line_numbers = JNI_TRUE;
+ caps.can_get_source_file_name = JNI_TRUE;
+ caps.can_get_source_debug_extension = JNI_TRUE;
+ error = jvmti->AddCapabilities(&caps);
+ if (error != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to set caps";
+ return JNI_ERR;
+ }
+
+ jvmtiEventCallbacks callbacks {}; // NOLINT [readability/braces]
+ callbacks.Breakpoint = &BreakpointCB;
+ callbacks.VMInit = &VMInitCB;
+
+ error = jvmti->SetEventCallbacks(&callbacks, static_cast<jint>(sizeof(callbacks)));
+
+ if (error != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to set event callbacks.";
+ return JNI_ERR;
+ }
+
+ error = jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_BREAKPOINT,
+ nullptr /* all threads */);
+ if (error != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to enable breakpoint event";
+ return JNI_ERR;
+ }
+ if (start == StartType::OnAttach) {
+ JNIEnv* env = nullptr;
+ jint res = 0;
+ res = vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_2);
+ if (res != JNI_OK || env == nullptr) {
+ LOG(ERROR) << "Unable to get jnienv";
+ return JNI_ERR;
+ }
+ VMInitCB(jvmti, env, nullptr);
+ } else {
+ error = jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_VM_INIT,
+ nullptr /* all threads */);
+ if (error != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to set event vminit";
+ return JNI_ERR;
+ }
+ }
+ return JNI_OK;
+}
+
+// Late attachment (e.g. 'am attach-agent').
+extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM *vm, char* options, void* reserved) {
+ return AgentStart(StartType::OnAttach, vm, options, reserved);
+}
+
+// Early attachment
+extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* jvm, char* options, void* reserved) {
+ return AgentStart(StartType::OnLoad, jvm, options, reserved);
+}
+
+} // namespace breakpoint_logger
+
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index d1ea15e988..53b509336e 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -35,7 +35,7 @@ fi
using_jack=$(get_build_var ANDROID_COMPILE_WITH_JACK)
java_libraries_dir=${out_dir}/target/common/obj/JAVA_LIBRARIES
-common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target"
+common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target libjdwp"
mode="target"
j_arg="-j$(nproc)"
showcommands=
diff --git a/tools/cpp-define-generator/offsets_all.def b/tools/cpp-define-generator/offsets_all.def
index c2e8c9728c..31587d8d62 100644
--- a/tools/cpp-define-generator/offsets_all.def
+++ b/tools/cpp-define-generator/offsets_all.def
@@ -40,7 +40,6 @@
#include "offset_thread.def"
// TODO: SHADOW_FRAME depends on __SIZEOF__POINTER__
// #include "offset_shadow_frame.def"
-#include "offset_codeitem.def"
// TODO: MIRROR_OBJECT_HEADER_SIZE (depends on #ifdef read barrier)
#include "offset_mirror_class.def"
#include "offset_mirror_dex_cache.def"
diff --git a/tools/golem/build-target.sh b/tools/golem/build-target.sh
index 8d8e2bbe6f..4ca2722ac9 100755
--- a/tools/golem/build-target.sh
+++ b/tools/golem/build-target.sh
@@ -147,12 +147,8 @@ get_build_var() {
[[ -n $target_product ]] && extras+=" TARGET_PRODUCT=$target_product"
[[ -n $target_build_variant ]] && extras+=" TARGET_BUILD_VARIANT=$target_build_variant"
- # call dumpvar-$name from the makefile system.
- (\cd "$(gettop)";
- CALLED_FROM_SETUP=true BUILD_SYSTEM=build/core \
- command make --no-print-directory -f build/core/config.mk \
- $extras \
- dumpvar-$varname)
+ # call dumpvar from the build system.
+ (\cd "$(gettop)"; env $extras build/soong/soong_ui.bash --dumpvar-mode $varname)
}
# Defaults from command-line.
@@ -160,7 +156,7 @@ get_build_var() {
mode="" # blank or 'golem' if --golem was specified.
golem_target="" # --golem=$golem_target
config="" # --machine-type=$config
-j_arg="-j8"
+j_arg=""
showcommands=""
simulate=""
make_tarball=""
@@ -353,7 +349,7 @@ fi
# and maybe calls lunch).
#
-execute make "${j_arg}" "${make_target}"
+execute build/soong/soong_ui.bash --make-mode "${j_arg}" "${make_target}"
if $strip_symbols; then
# Further reduce size by stripping symbols.
diff --git a/tools/libjdwp_art_failures.txt b/tools/libjdwp_art_failures.txt
index 1812177e2f..646a96adbb 100644
--- a/tools/libjdwp_art_failures.txt
+++ b/tools/libjdwp_art_failures.txt
@@ -22,13 +22,6 @@
name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues006Test#testGetValues006"
},
{
- description: "Tests fail due to using the not yet supported interrupt thread functions",
- result: EXEC_FAILED,
- bug: 34415266,
- names: [ "org.apache.harmony.jpda.tests.jdwp.ThreadReference.CurrentContendedMonitorTest#testCurrentContendedMonitor001",
- "org.apache.harmony.jpda.tests.jdwp.ThreadReference.InterruptTest#testInterrupt001" ]
-},
-{
description: "Tests fail with assertion error on slot number",
result: EXEC_FAILED,
bug: 66905468,
diff --git a/tools/libjdwp_oj_art_failures.txt b/tools/libjdwp_oj_art_failures.txt
new file mode 100644
index 0000000000..e0f243ccfb
--- /dev/null
+++ b/tools/libjdwp_oj_art_failures.txt
@@ -0,0 +1,65 @@
+/*
+ * This file contains expectations for ART's buildbot. The purpose of this file is
+ * to temporarily list failing tests and not break the bots.
+ */
+[
+{
+ description: "Test fails due to unexpectedly getting the thread-groups of zombie threads",
+ result: EXEC_FAILED,
+ bug: 66906414,
+ name: "org.apache.harmony.jpda.tests.jdwp.ThreadReference.ThreadGroup002Test#testThreadGroup002"
+},
+{
+ description: "Test fails due to modifiers not including ACC_SUPER",
+ result: EXEC_FAILED,
+ bug: 66906055,
+ name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType.ModifiersTest#testModifiers001"
+},
+{
+ description: "Test fails due to static values not being set correctly.",
+ result: EXEC_FAILED,
+ bug: 66905894,
+ name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues006Test#testGetValues006"
+},
+{
+ description: "Tests fail with assertion error on slot number",
+ result: EXEC_FAILED,
+ bug: 66905468,
+ names: [ "org.apache.harmony.jpda.tests.jdwp.Method.VariableTableTest#testVariableTableTest001",
+ "org.apache.harmony.jpda.tests.jdwp.Method.VariableTableWithGenericTest#testVariableTableWithGenericTest001" ]
+},
+{
+ description: "Test fails with Error VM_DEAD when trying to resume during VM_DEATH event",
+ result: EXEC_FAILED,
+ bug: 66904725,
+ name: "org.apache.harmony.jpda.tests.jdwp.Events.VMDeath002Test#testVMDeathRequest"
+},
+/* TODO Categorize these failures more. */
+{
+ description: "Tests that fail on both ART and RI. These tests are likely incorrect",
+ result: EXEC_FAILED,
+ bug: 66906734,
+ names: [ "org.apache.harmony.jpda.tests.jdwp.ArrayReference.SetValues003Test#testSetValues003_InvalidIndex",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethod002Test#testInvokeMethod_wrong_argument_types",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethodTest#testInvokeMethod002",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethodTest#testInvokeMethod003",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType.NewInstanceTest#testNewInstance002",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType.SetValues002Test#testSetValues002",
+ "org.apache.harmony.jpda.tests.jdwp.Events.ClassPrepare002Test#testClassPrepareCausedByDebugger",
+ "org.apache.harmony.jpda.tests.jdwp.Events.ExceptionCaughtTest#testExceptionEvent_ThrowLocation_FromNative",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference.DisableCollectionTest#testDisableCollection_null",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection_invalid",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection_null",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference.GetValues002Test#testGetValues002",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference.SetValues003Test#testSetValues003",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference.SetValuesTest#testSetValues001",
+ "org.apache.harmony.jpda.tests.jdwp.ReferenceType.FieldsWithGenericTest#testFieldsWithGeneric001",
+ "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues002Test#testGetValues002",
+ "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues004Test#testGetValues004",
+ "org.apache.harmony.jpda.tests.jdwp.StringReference.ValueTest#testStringReferenceValueTest001_NullString",
+ "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ChildrenTest#testChildren_NullObject",
+ "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.NameTest#testName001_NullObject",
+ "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ParentTest#testParent_NullObject",
+ "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.CapabilitiesNewTest#testCapabilitiesNew001" ]
+}
+]
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index f9dfb8ba96..db8c54056d 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -285,7 +285,7 @@ fi
if [[ $using_jack == "true" ]]; then
toolchain_args="--toolchain jack --language JN --jack-arg -g"
else
- toolchain_args="--toolchain jdk --language CUR"
+ toolchain_args="--toolchain dx --language CUR"
fi
# Run the tests using vogar.
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index eecdd2fb5e..ed3cf40f56 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -149,7 +149,7 @@ vogar_args="$vogar_args --timeout 480"
if [[ $using_jack == "true" ]]; then
vogar_args="$vogar_args --toolchain jack --language JO"
else
- vogar_args="$vogar_args --toolchain jdk --language CUR"
+ vogar_args="$vogar_args --toolchain dx --language CUR"
fi
# JIT settings.
diff --git a/tools/run-libjdwp-tests.sh b/tools/run-libjdwp-tests.sh
new file mode 100755
index 0000000000..964bb386ef
--- /dev/null
+++ b/tools/run-libjdwp-tests.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [[ ! -d libcore ]]; then
+ echo "Script needs to be run at the root of the android tree"
+ exit 1
+fi
+
+if [[ `uname` != 'Linux' ]]; then
+ echo "Script cannot be run on $(uname). It is Linux only."
+ exit 2
+fi
+
+args=("$@")
+debug="no"
+has_variant="no"
+has_mode="no"
+mode="target"
+
+while true; do
+ if [[ $1 == "--debug" ]]; then
+ debug="yes"
+ shift
+ elif [[ "$1" == "--mode=jvm" ]]; then
+ has_mode="yes"
+ mode="ri"
+ shift
+ elif [[ "$1" == --mode=host ]]; then
+ has_mode="yes"
+ mode="host"
+ shift
+ elif [[ $1 == --variant=* ]]; then
+ has_variant="yes"
+ shift
+ elif [[ "$1" == "" ]]; then
+ break
+ else
+ shift
+ fi
+done
+
+if [[ "$has_mode" = "no" ]]; then
+ args+=(--mode=device)
+fi
+
+if [[ "$has_variant" = "no" ]]; then
+ args+=(--variant=X32)
+fi
+
+# We don't use full paths since it is difficult to determine them for device
+# tests and not needed due to resolution rules of dlopen.
+if [[ "$debug" = "yes" ]]; then
+ args+=(-Xplugin:libopenjdkjvmtid.so)
+else
+ args+=(-Xplugin:libopenjdkjvmti.so)
+fi
+
+expect_path=$PWD/art/tools/libjdwp_oj_art_failures.txt
+function verbose_run() {
+ echo "$@"
+ env "$@"
+}
+
+verbose_run ./art/tools/run-jdwp-tests.sh \
+ "${args[@]}" \
+ --jdwp-path "libjdwp.so" \
+ --expectations "$expect_path"