summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.bp3
-rw-r--r--build/Android.bp6
-rw-r--r--build/Android.gtest.mk4
-rw-r--r--build/art.go5
-rw-r--r--compiler/Android.bp6
-rw-r--r--compiler/driver/compiler_driver.cc19
-rw-r--r--compiler/driver/compiler_driver.h5
-rw-r--r--compiler/driver/compiler_options.cc1
-rw-r--r--compiler/driver/compiler_options.h5
-rw-r--r--compiler/driver/compiler_options_map-inl.h4
-rw-r--r--compiler/driver/compiler_options_map.def1
-rw-r--r--compiler/exception_test.cc3
-rw-r--r--compiler/optimizing/code_generator.cc348
-rw-r--r--compiler/optimizing/code_generator.h89
-rw-r--r--compiler/optimizing/code_generator_arm64.cc63
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc86
-rw-r--r--compiler/optimizing/code_generator_mips.cc124
-rw-r--r--compiler/optimizing/code_generator_mips.h4
-rw-r--r--compiler/optimizing/code_generator_mips64.cc63
-rw-r--r--compiler/optimizing/code_generator_vector_arm64.cc6
-rw-r--r--compiler/optimizing/code_generator_vector_mips.cc174
-rw-r--r--compiler/optimizing/code_generator_vector_mips64.cc168
-rw-r--r--compiler/optimizing/code_generator_x86.cc84
-rw-r--r--compiler/optimizing/code_generator_x86.h4
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc84
-rw-r--r--compiler/optimizing/code_generator_x86_64.h4
-rw-r--r--compiler/optimizing/data_type.h2
-rw-r--r--compiler/optimizing/data_type_test.cc2
-rw-r--r--compiler/optimizing/emit_swap_mips_test.cc32
-rw-r--r--compiler/optimizing/instruction_simplifier.cc59
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc28
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc24
-rw-r--r--compiler/optimizing/intrinsics_mips.cc27
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc22
-rw-r--r--compiler/optimizing/intrinsics_x86.cc23
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc23
-rw-r--r--compiler/optimizing/load_store_elimination.cc23
-rw-r--r--compiler/optimizing/loop_optimization.cc61
-rw-r--r--compiler/optimizing/nodes.h21
-rw-r--r--compiler/optimizing/nodes_shared.cc3
-rw-r--r--compiler/optimizing/nodes_vector.h20
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc1
-rw-r--r--compiler/optimizing/optimizing_cfi_test_expected.inc12
-rw-r--r--compiler/optimizing/optimizing_compiler.cc26
-rw-r--r--compiler/optimizing/register_allocation_resolver.cc1
-rw-r--r--compiler/optimizing/stack_map_stream.cc34
-rw-r--r--compiler/optimizing/stack_map_stream.h35
-rw-r--r--compiler/optimizing/stack_map_test.cc30
-rw-r--r--compiler/utils/assembler_test.h18
-rw-r--r--compiler/utils/mips/assembler_mips.cc288
-rw-r--r--compiler/utils/mips/assembler_mips.h57
-rw-r--r--compiler/utils/mips/assembler_mips32r6_test.cc126
-rw-r--r--compiler/utils/mips/assembler_mips_test.cc88
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc156
-rw-r--r--compiler/utils/mips64/assembler_mips64.h30
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc145
-rw-r--r--dalvikvm/Android.bp3
-rw-r--r--dex2oat/dex2oat.cc23
-rw-r--r--dex2oat/dex2oat_image_test.cc1
-rw-r--r--dex2oat/dex2oat_options.cc5
-rw-r--r--dex2oat/dex2oat_options.def1
-rw-r--r--dex2oat/dex2oat_options.h1
-rw-r--r--dex2oat/dex2oat_test.cc112
-rw-r--r--dex2oat/linker/image_test.h3
-rw-r--r--dex2oat/linker/oat_writer.cc23
-rw-r--r--dex2oat/linker/oat_writer.h9
-rw-r--r--dex2oat/linker/oat_writer_test.cc11
-rw-r--r--dexdump/Android.bp21
-rw-r--r--dexdump/dexdump.cc3
-rw-r--r--dexlayout/Android.bp5
-rw-r--r--dexlayout/dex_ir.cc51
-rw-r--r--dexlayout/dex_ir.h45
-rw-r--r--dexlayout/dex_ir_builder.cc1
-rw-r--r--dexlayout/dex_verify.cc109
-rw-r--r--dexlayout/dex_verify.h8
-rw-r--r--dexlayout/dex_writer.cc19
-rw-r--r--dexlayout/dex_writer.h11
-rw-r--r--dexlayout/dexlayout.cc253
-rw-r--r--dexlayout/dexlayout.h15
-rw-r--r--dexlayout/dexlayout_test.cc3
-rw-r--r--dexlist/dexlist.cc3
-rw-r--r--disassembler/disassembler_mips.cc33
-rw-r--r--oatdump/oatdump.cc135
-rw-r--r--oatdump/oatdump_test.cc12
-rw-r--r--oatdump/oatdump_test.h20
-rw-r--r--openjdkjvm/Android.bp4
-rw-r--r--openjdkjvm/OpenjdkJvm.cc4
-rw-r--r--openjdkjvmti/Android.bp7
-rw-r--r--openjdkjvmti/OpenjdkJvmTi.cc6
-rw-r--r--openjdkjvmti/art_jvmti.h13
-rw-r--r--openjdkjvmti/deopt_manager.cc322
-rw-r--r--openjdkjvmti/deopt_manager.h168
-rw-r--r--openjdkjvmti/events-inl.h11
-rw-r--r--openjdkjvmti/events.cc57
-rw-r--r--openjdkjvmti/events.h1
-rw-r--r--openjdkjvmti/jvmti_weak_table-inl.h2
-rw-r--r--openjdkjvmti/ti_breakpoint.cc62
-rw-r--r--openjdkjvmti/ti_class.cc2
-rw-r--r--openjdkjvmti/ti_class_loader.cc2
-rw-r--r--openjdkjvmti/ti_field.cc4
-rw-r--r--openjdkjvmti/ti_field.h14
-rw-r--r--openjdkjvmti/ti_method.cc31
-rw-r--r--openjdkjvmti/ti_monitor.cc4
-rw-r--r--openjdkjvmti/ti_phase.cc2
-rw-r--r--openjdkjvmti/ti_properties.cc4
-rw-r--r--openjdkjvmti/ti_redefine.cc2
-rw-r--r--openjdkjvmti/ti_search.cc5
-rw-r--r--openjdkjvmti/ti_stack.cc69
-rw-r--r--openjdkjvmti/ti_thread.cc148
-rw-r--r--profman/profman.cc2
-rw-r--r--runtime/Android.bp8
-rw-r--r--runtime/arch/mips/asm_support_mips.S26
-rw-r--r--runtime/arch/mips/asm_support_mips.h2
-rw-r--r--runtime/arch/mips/context_mips.cc15
-rw-r--r--runtime/arch/mips/jni_entrypoints_mips.S10
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S214
-rw-r--r--runtime/arch/mips/quick_method_frame_info_mips.h18
-rw-r--r--runtime/base/bit_struct.h18
-rw-r--r--runtime/base/bit_struct_detail.h57
-rw-r--r--runtime/base/bit_struct_test.cc85
-rw-r--r--runtime/base/bit_utils.h3
-rw-r--r--runtime/base/bit_utils_test.cc2
-rw-r--r--runtime/base/bit_vector-inl.h18
-rw-r--r--runtime/base/bit_vector.h20
-rw-r--r--runtime/base/debug_stack.h10
-rw-r--r--runtime/base/mutex.h2
-rw-r--r--runtime/base/scoped_arena_allocator.cc33
-rw-r--r--runtime/base/scoped_arena_allocator.h18
-rw-r--r--runtime/cdex/compact_dex_file.cc49
-rw-r--r--runtime/cdex/compact_dex_file.h66
-rw-r--r--runtime/cdex/compact_dex_file_test.cc48
-rw-r--r--runtime/cdex/compact_dex_level.h34
-rw-r--r--runtime/class_linker.cc24
-rw-r--r--runtime/class_linker.h9
-rw-r--r--runtime/class_linker_test.cc28
-rw-r--r--runtime/class_loader_context.cc1
-rw-r--r--runtime/common_dex_operations.h26
-rw-r--r--runtime/common_runtime_test.cc12
-rw-r--r--runtime/common_throws.cc2
-rw-r--r--runtime/debugger.cc8
-rw-r--r--runtime/debugger.h1
-rw-r--r--runtime/dex2oat_environment_test.h2
-rw-r--r--runtime/dex_file-inl.h275
-rw-r--r--runtime/dex_file.cc266
-rw-r--r--runtime/dex_file.h77
-rw-r--r--runtime/dex_file_annotations.cc17
-rw-r--r--runtime/dex_file_loader.cc123
-rw-r--r--runtime/dex_file_loader.h26
-rw-r--r--runtime/dex_file_test.cc12
-rw-r--r--runtime/dex_file_verifier.cc18
-rw-r--r--runtime/dex_file_verifier.h2
-rw-r--r--runtime/dex_file_verifier_test.cc5
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc10
-rw-r--r--runtime/gc/collector/concurrent_copying.cc8
-rw-r--r--runtime/gc/heap.cc18
-rw-r--r--runtime/gc/heap.h4
-rw-r--r--runtime/gc/reference_processor.cc2
-rw-r--r--runtime/instrumentation.cc9
-rw-r--r--runtime/interpreter/interpreter.cc8
-rw-r--r--runtime/interpreter/interpreter_common.cc18
-rw-r--r--runtime/interpreter/interpreter_common.h11
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc8
-rw-r--r--runtime/interpreter/mterp/Makefile_mterp2
-rw-r--r--runtime/interpreter/mterp/arm/const.S18
-rw-r--r--runtime/interpreter/mterp/arm/entry.S2
-rw-r--r--runtime/interpreter/mterp/arm/invoke_polymorphic.S21
-rw-r--r--runtime/interpreter/mterp/arm/op_const_class.S14
-rw-r--r--runtime/interpreter/mterp/arm/op_const_method_handle.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_const_method_type.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_const_string.S14
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_custom.S8
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_custom_range.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_polymorphic.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_polymorphic_range.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_fe.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_ff.S1
-rw-r--r--runtime/interpreter/mterp/arm64/close_cfi.S4
-rw-r--r--runtime/interpreter/mterp/arm64/const.S17
-rw-r--r--runtime/interpreter/mterp/arm64/entry.S7
-rw-r--r--runtime/interpreter/mterp/arm64/footer.S3
-rw-r--r--runtime/interpreter/mterp/arm64/header.S17
-rw-r--r--runtime/interpreter/mterp/arm64/invoke_polymorphic.S19
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_class.S13
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_method_handle.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_method_type.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_string.S13
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_custom.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_custom_range.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_polymorphic.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_polymorphic_range.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_fe.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_ff.S1
-rw-r--r--runtime/interpreter/mterp/config_arm12
-rw-r--r--runtime/interpreter/mterp/config_arm6415
-rw-r--r--runtime/interpreter/mterp/config_mips12
-rw-r--r--runtime/interpreter/mterp/config_mips6412
-rw-r--r--runtime/interpreter/mterp/config_x8612
-rw-r--r--runtime/interpreter/mterp/config_x86_6412
-rwxr-xr-xruntime/interpreter/mterp/gen_mterp.py6
-rw-r--r--runtime/interpreter/mterp/mips/const.S17
-rw-r--r--runtime/interpreter/mterp/mips/invoke_polymorphic.S19
-rw-r--r--runtime/interpreter/mterp/mips/op_const_class.S13
-rw-r--r--runtime/interpreter/mterp/mips/op_const_method_handle.S1
-rw-r--r--runtime/interpreter/mterp/mips/op_const_method_type.S1
-rw-r--r--runtime/interpreter/mterp/mips/op_const_string.S13
-rw-r--r--runtime/interpreter/mterp/mips/op_invoke_custom.S1
-rw-r--r--runtime/interpreter/mterp/mips/op_invoke_custom_range.S1
-rw-r--r--runtime/interpreter/mterp/mips/op_invoke_polymorphic.S1
-rw-r--r--runtime/interpreter/mterp/mips/op_invoke_polymorphic_range.S1
-rw-r--r--runtime/interpreter/mterp/mips/op_unused_fe.S1
-rw-r--r--runtime/interpreter/mterp/mips/op_unused_ff.S1
-rw-r--r--runtime/interpreter/mterp/mips64/const.S17
-rw-r--r--runtime/interpreter/mterp/mips64/header.S53
-rw-r--r--runtime/interpreter/mterp/mips64/invoke_polymorphic.S20
-rw-r--r--runtime/interpreter/mterp/mips64/op_const_class.S14
-rw-r--r--runtime/interpreter/mterp/mips64/op_const_method_handle.S1
-rw-r--r--runtime/interpreter/mterp/mips64/op_const_method_type.S1
-rw-r--r--runtime/interpreter/mterp/mips64/op_const_string.S14
-rw-r--r--runtime/interpreter/mterp/mips64/op_invoke_custom.S1
-rw-r--r--runtime/interpreter/mterp/mips64/op_invoke_custom_range.S1
-rw-r--r--runtime/interpreter/mterp/mips64/op_invoke_polymorphic.S1
-rw-r--r--runtime/interpreter/mterp/mips64/op_invoke_polymorphic_range.S1
-rw-r--r--runtime/interpreter/mterp/mips64/op_unused_fe.S1
-rw-r--r--runtime/interpreter/mterp/mips64/op_unused_ff.S1
-rw-r--r--runtime/interpreter/mterp/mterp.cc69
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm.S202
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm64.S214
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips.S175
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips64.S232
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86.S201
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86_64.S181
-rw-r--r--runtime/interpreter/mterp/x86/const.S19
-rw-r--r--runtime/interpreter/mterp/x86/invoke_polymorphic.S25
-rw-r--r--runtime/interpreter/mterp/x86/op_const_class.S15
-rw-r--r--runtime/interpreter/mterp/x86/op_const_method_handle.S1
-rw-r--r--runtime/interpreter/mterp/x86/op_const_method_type.S1
-rw-r--r--runtime/interpreter/mterp/x86/op_const_string.S15
-rw-r--r--runtime/interpreter/mterp/x86/op_invoke_custom.S1
-rw-r--r--runtime/interpreter/mterp/x86/op_invoke_custom_range.S1
-rw-r--r--runtime/interpreter/mterp/x86/op_invoke_polymorphic.S1
-rw-r--r--runtime/interpreter/mterp/x86/op_invoke_polymorphic_range.S1
-rw-r--r--runtime/interpreter/mterp/x86/op_unused_fe.S1
-rw-r--r--runtime/interpreter/mterp/x86/op_unused_ff.S1
-rw-r--r--runtime/interpreter/mterp/x86_64/const.S15
-rw-r--r--runtime/interpreter/mterp/x86_64/invoke_polymorphic.S22
-rw-r--r--runtime/interpreter/mterp/x86_64/op_const_class.S11
-rw-r--r--runtime/interpreter/mterp/x86_64/op_const_method_handle.S1
-rw-r--r--runtime/interpreter/mterp/x86_64/op_const_method_type.S1
-rw-r--r--runtime/interpreter/mterp/x86_64/op_const_string.S11
-rw-r--r--runtime/interpreter/mterp/x86_64/op_invoke_custom.S1
-rw-r--r--runtime/interpreter/mterp/x86_64/op_invoke_custom_range.S1
-rw-r--r--runtime/interpreter/mterp/x86_64/op_invoke_polymorphic.S1
-rw-r--r--runtime/interpreter/mterp/x86_64/op_invoke_polymorphic_range.S1
-rw-r--r--runtime/interpreter/mterp/x86_64/op_unused_fe.S1
-rw-r--r--runtime/interpreter/mterp/x86_64/op_unused_ff.S1
-rw-r--r--runtime/interpreter/unstarted_runtime.cc8
-rw-r--r--runtime/interpreter/unstarted_runtime_list.h1
-rw-r--r--runtime/java_vm_ext.cc4
-rw-r--r--runtime/jit/jit.cc7
-rw-r--r--runtime/jni_internal.cc2
-rw-r--r--runtime/jni_internal_test.cc2
-rw-r--r--runtime/mirror/dex_cache_test.cc14
-rw-r--r--runtime/monitor.cc19
-rw-r--r--runtime/native/dalvik_system_DexFile.cc4
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc4
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc31
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc7
-rw-r--r--runtime/native/java_lang_Class.cc4
-rw-r--r--runtime/native/java_lang_String.cc10
-rw-r--r--runtime/native/java_lang_StringFactory.cc4
-rw-r--r--runtime/native/java_lang_Thread.cc2
-rw-r--r--runtime/native/java_lang_VMClassLoader.cc4
-rw-r--r--runtime/native/libcore_util_CharsetUtils.cc2
-rw-r--r--runtime/native/native_util.h2
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc2
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc4
-rw-r--r--runtime/non_debuggable_classes.cc2
-rw-r--r--runtime/oat_file.cc28
-rw-r--r--runtime/oat_file_manager.cc8
-rw-r--r--runtime/parsed_options.cc7
-rw-r--r--runtime/plugin.cc6
-rw-r--r--runtime/reflection.cc2
-rw-r--r--runtime/reflection_test.cc2
-rw-r--r--runtime/runtime.cc42
-rw-r--r--runtime/runtime.h10
-rw-r--r--runtime/runtime_callbacks.cc11
-rw-r--r--runtime/runtime_callbacks.h10
-rw-r--r--runtime/runtime_callbacks_test.cc2
-rw-r--r--runtime/runtime_options.def1
-rw-r--r--runtime/signal_catcher.cc2
-rw-r--r--runtime/standard_dex_file.h9
-rw-r--r--runtime/thread.cc23
-rw-r--r--runtime/thread.h6
-rw-r--r--runtime/thread_list.cc11
-rw-r--r--runtime/ti/agent.cc3
-rw-r--r--runtime/trace.cc2
-rw-r--r--runtime/vdex_file.cc66
-rw-r--r--runtime/vdex_file.h10
-rw-r--r--runtime/well_known_classes.cc2
-rw-r--r--test/004-ThreadStress/src/Main.java64
-rw-r--r--test/100-reflect2/expected.txt2
-rw-r--r--test/1929-exception-catch-exception/build20
-rw-r--r--test/1934-jvmti-signal-thread/src/art/Test1934.java32
-rw-r--r--test/458-checker-instruct-simplification/src/Main.java434
-rw-r--r--test/482-checker-loop-back-edge-use/build20
-rw-r--r--test/482-checker-loop-back-edge-use/src/Main.java4
-rw-r--r--test/484-checker-register-hints/build20
-rw-r--r--test/484-checker-register-hints/smali/Smali.smali143
-rw-r--r--test/484-checker-register-hints/src/Main.java12
-rw-r--r--test/538-checker-embed-constants/src/Main.java11
-rwxr-xr-xtest/586-checker-null-array-get/build3
-rw-r--r--test/586-checker-null-array-get/smali/SmaliTests.smali71
-rw-r--r--test/586-checker-null-array-get/src/Main.java13
-rwxr-xr-xtest/593-checker-boolean-2-integral-conv/build3
-rw-r--r--test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali137
-rw-r--r--test/593-checker-boolean-2-integral-conv/src/Main.java70
-rw-r--r--test/611-checker-simplify-if/build20
-rw-r--r--test/623-checker-loop-regressions/src/Main.java51
-rw-r--r--test/640-checker-boolean-simd/src/Main.java48
-rw-r--r--test/640-checker-byte-simd/src/Main.java90
-rw-r--r--test/640-checker-char-simd/src/Main.java90
-rw-r--r--test/640-checker-double-simd/src/Main.java57
-rw-r--r--test/640-checker-float-simd/src/Main.java63
-rw-r--r--test/640-checker-int-simd/src/Main.java184
-rw-r--r--test/640-checker-long-simd/src/Main.java136
-rw-r--r--test/640-checker-short-simd/src/Main.java90
-rw-r--r--test/645-checker-abs-simd/src/Main.java100
-rw-r--r--test/646-checker-hadd-alt-byte/src/Main.java175
-rw-r--r--test/646-checker-hadd-alt-char/src/Main.java174
-rw-r--r--test/646-checker-hadd-alt-short/src/Main.java175
-rw-r--r--test/646-checker-hadd-byte/src/Main.java175
-rw-r--r--test/646-checker-hadd-char/src/Main.java173
-rw-r--r--test/646-checker-hadd-short/src/Main.java272
-rw-r--r--test/651-checker-byte-simd-minmax/src/Main.java113
-rw-r--r--test/651-checker-char-simd-minmax/src/Main.java53
-rw-r--r--test/651-checker-double-simd-minmax/src/Main.java18
-rw-r--r--test/651-checker-float-simd-minmax/src/Main.java18
-rw-r--r--test/651-checker-int-simd-minmax/src/Main.java46
-rw-r--r--test/651-checker-long-simd-minmax/src/Main.java18
-rw-r--r--test/651-checker-short-simd-minmax/src/Main.java113
-rw-r--r--test/660-checker-simd-sad-int/src/Main.java64
-rw-r--r--test/660-checker-simd-sad-short2/src/Main.java112
-rw-r--r--test/661-checker-simd-reduc/src/Main.java409
-rw-r--r--test/665-checker-simd-zero/src/Main.java48
-rw-r--r--test/667-out-of-bounds/expected.txt1
-rw-r--r--test/667-out-of-bounds/info.txt3
-rw-r--r--test/667-out-of-bounds/src/Main.java30
-rw-r--r--test/910-methods/check10
-rw-r--r--test/924-threads/expected.txt2
-rw-r--r--test/924-threads/src/art/Test924.java33
-rw-r--r--test/924-threads/threads.cc40
-rw-r--r--test/992-source-data/expected.txt12
-rw-r--r--test/992-source-data/source_file.cc13
-rw-r--r--test/992-source-data/src/art/Test992.java21
-rw-r--r--test/993-breakpoints/breakpoints.cc51
-rw-r--r--test/993-breakpoints/expected.txt101
-rw-r--r--test/993-breakpoints/src/art/Test993.java178
-rw-r--r--test/Android.bp33
-rwxr-xr-xtest/etc/run-test-jar10
-rwxr-xr-xtest/run-test6
-rw-r--r--test/testrunner/device_config.py20
-rw-r--r--test/testrunner/env.py8
-rwxr-xr-xtest/testrunner/run_build_test_target.py1
-rwxr-xr-xtest/testrunner/testrunner.py38
-rw-r--r--tools/ahat/Android.mk22
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java73
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java2
-rw-r--r--tools/ahat/src/test-dump/Main.java11
-rw-r--r--tools/ahat/src/test/com/android/ahat/InstanceTest.java49
-rw-r--r--tools/breakpoint-logger/Android.bp66
-rw-r--r--tools/breakpoint-logger/README.md54
-rw-r--r--tools/breakpoint-logger/breakpoint_logger.cc447
-rwxr-xr-xtools/golem/build-target.sh12
-rw-r--r--tools/libjdwp_art_failures.txt35
-rwxr-xr-xtools/run-jdwp-tests.sh4
-rwxr-xr-xtools/run-libcore-tests.sh2
-rw-r--r--tools/wrapagentproperties/wrapagentproperties.cc4
377 files changed, 10113 insertions, 4163 deletions
diff --git a/Android.bp b/Android.bp
index 1b66e6fbbd..295ae4c556 100644
--- a/Android.bp
+++ b/Android.bp
@@ -11,6 +11,8 @@ art_static_dependencies = [
"libbacktrace",
"libcutils",
"libunwindbacktrace",
+ "libunwind",
+ "libunwindstack",
"libutils",
"libbase",
"liblz4",
@@ -39,6 +41,7 @@ subdirs = [
"sigchainlib",
"simulator",
"test",
+ "tools/breakpoint-logger",
"tools/cpp-define-generator",
"tools/dmtracedump",
"tools/titrace",
diff --git a/build/Android.bp b/build/Android.bp
index ff762dd703..2c959d46f5 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -67,10 +67,6 @@ art_global_defaults {
cflags: [
"-DART_TARGET",
- // Enable missing-noreturn only on non-Mac. As lots of things are not implemented
- // for Apple, it's a pain.
- "-Wmissing-noreturn",
-
// To use oprofile_android --callgraph, uncomment this and recompile with
// mmma -j art
// "-fno-omit-frame-pointer",
@@ -83,7 +79,7 @@ art_global_defaults {
"bionic/libc/private",
],
},
- linux_glibc: {
+ linux: {
cflags: [
// Enable missing-noreturn only on non-Mac. As lots of things are not implemented for
// Apple, it's a pain.
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 0f92a25366..1685a5f986 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -100,7 +100,7 @@ $(ART_TEST_TARGET_GTEST_VerifierDepsMulti_DEX): $(ART_TEST_GTEST_VerifierDepsMul
$(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^)
# Dex file dependencies for each gtest.
-ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested
+ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested VerifierDeps VerifierDepsMulti
ART_GTEST_atomic_dex_ref_map_test_DEX_DEPS := Interfaces
ART_GTEST_class_linker_test_DEX_DEPS := AllFields ErroneousA ErroneousB ErroneousInit ForClassLoaderA ForClassLoaderB ForClassLoaderC ForClassLoaderD Interfaces MethodTypes MultiDex MyClass Nested Statics StaticsFromCode
@@ -110,7 +110,7 @@ ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods Prof
ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested MultiDex
ART_GTEST_dexlayout_test_DEX_DEPS := ManyMethods
-ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps
+ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ManyMethods Statics VerifierDeps
ART_GTEST_dex2oat_image_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB DefaultMethods
diff --git a/build/art.go b/build/art.go
index 1bcaf51a1d..452b3485a3 100644
--- a/build/art.go
+++ b/build/art.go
@@ -97,6 +97,11 @@ func globalFlags(ctx android.BaseContext) ([]string, []string) {
asflags = append(asflags, "-DART_ENABLE_ADDRESS_SANITIZER=1")
}
+ if envTrue(ctx, "ART_MIPS32_CHECK_ALIGNMENT") {
+ // Enable the use of MIPS32 CHECK_ALIGNMENT macro for debugging purposes
+ asflags = append(asflags, "-DART_MIPS32_CHECK_ALIGNMENT")
+ }
+
return cflags, asflags
}
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 1e4cdf2bd5..fe0f3c75c5 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -195,6 +195,7 @@ art_cc_defaults {
include_dirs: ["art/disassembler"],
header_libs: [
"art_cmdlineparser_headers", // For compiler_options.
+ "libnativehelper_header_only",
],
export_include_dirs: ["."],
@@ -406,7 +407,10 @@ art_cc_test {
},
},
- header_libs: ["libart_simulator_headers"],
+ header_libs: [
+ "libart_simulator_headers",
+ "libnativehelper_header_only",
+ ],
shared_libs: [
"libartd-compiler",
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 7573367788..547ffbcf62 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -296,6 +296,7 @@ CompilerDriver::CompilerDriver(
image_classes_(image_classes),
classes_to_compile_(compiled_classes),
methods_to_compile_(compiled_methods),
+ number_of_soft_verifier_failures_(0),
had_hard_verifier_failure_(false),
parallel_thread_count_(thread_count),
stats_(new AOTCompilationStats),
@@ -923,6 +924,12 @@ void CompilerDriver::PreCompile(jobject class_loader,
LOG(FATAL_WITHOUT_ABORT) << "Had a hard failure verifying all classes, and was asked to abort "
<< "in such situations. Please check the log.";
abort();
+ } else if (number_of_soft_verifier_failures_ > 0 &&
+ GetCompilerOptions().AbortOnSoftVerifierFailure()) {
+ LOG(FATAL_WITHOUT_ABORT) << "Had " << number_of_soft_verifier_failures_ << " soft failure(s) "
+ << "verifying all classes, and was asked to abort in such situations. "
+ << "Please check the log.";
+ abort();
}
if (compiler_options_->IsAnyCompilationEnabled()) {
@@ -2069,13 +2076,13 @@ class VerifyClassVisitor : public CompilationVisitor {
LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
<< " because: " << error_msg;
manager_->GetCompiler()->SetHadHardVerifierFailure();
+ } else if (failure_kind == verifier::FailureKind::kSoftFailure) {
+ manager_->GetCompiler()->AddSoftVerifierFailure();
} else {
// Force a soft failure for the VerifierDeps. This is a sanity measure, as
// the vdex file already records that the class hasn't been resolved. It avoids
// trying to do future verification optimizations when processing the vdex file.
- DCHECK(failure_kind == verifier::FailureKind::kSoftFailure ||
- failure_kind == verifier::FailureKind::kNoFailure)
- << failure_kind;
+ DCHECK(failure_kind == verifier::FailureKind::kNoFailure) << failure_kind;
failure_kind = verifier::FailureKind::kSoftFailure;
}
} else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
@@ -2087,6 +2094,8 @@ class VerifyClassVisitor : public CompilationVisitor {
CHECK(soa.Self()->IsExceptionPending());
soa.Self()->ClearException();
manager_->GetCompiler()->SetHadHardVerifierFailure();
+ } else if (failure_kind == verifier::FailureKind::kSoftFailure) {
+ manager_->GetCompiler()->AddSoftVerifierFailure();
}
CHECK(klass->ShouldVerifyAtRuntime() || klass->IsVerified() || klass->IsErroneous())
@@ -2152,7 +2161,9 @@ void CompilerDriver::VerifyDexFile(jobject class_loader,
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
thread_pool);
- verifier::HardFailLogMode log_level = GetCompilerOptions().AbortOnHardVerifierFailure()
+ bool abort_on_verifier_failures = GetCompilerOptions().AbortOnHardVerifierFailure()
+ || GetCompilerOptions().AbortOnSoftVerifierFailure();
+ verifier::HardFailLogMode log_level = abort_on_verifier_failures
? verifier::HardFailLogMode::kLogInternalFatal
: verifier::HardFailLogMode::kLogWarning;
VerifyClassVisitor visitor(&context, log_level);
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index f16e2ed7d3..da4a580bf2 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_DRIVER_COMPILER_DRIVER_H_
#define ART_COMPILER_DRIVER_COMPILER_DRIVER_H_
+#include <atomic>
#include <set>
#include <string>
#include <unordered_set>
@@ -352,6 +353,9 @@ class CompilerDriver {
void SetHadHardVerifierFailure() {
had_hard_verifier_failure_ = true;
}
+ void AddSoftVerifierFailure() {
+ number_of_soft_verifier_failures_++;
+ }
Compiler::Kind GetCompilerKind() {
return compiler_kind_;
@@ -519,6 +523,7 @@ class CompilerDriver {
// This option may be restricted to the boot image, depending on a flag in the implementation.
std::unique_ptr<std::unordered_set<std::string>> methods_to_compile_;
+ std::atomic<uint32_t> number_of_soft_verifier_failures_;
bool had_hard_verifier_failure_;
// A thread pool that can (potentially) run tasks in parallel.
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index b6cedff28a..f789314114 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -51,6 +51,7 @@ CompilerOptions::CompilerOptions()
compile_pic_(false),
verbose_methods_(),
abort_on_hard_verifier_failure_(false),
+ abort_on_soft_verifier_failure_(false),
init_failure_output_(nullptr),
dump_cfg_file_name_(""),
dump_cfg_append_(false),
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 311dbd569e..12de9be60b 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -226,6 +226,9 @@ class CompilerOptions FINAL {
bool AbortOnHardVerifierFailure() const {
return abort_on_hard_verifier_failure_;
}
+ bool AbortOnSoftVerifierFailure() const {
+ return abort_on_soft_verifier_failure_;
+ }
const std::vector<const DexFile*>* GetNoInlineFromDexFile() const {
return no_inline_from_;
@@ -303,6 +306,8 @@ class CompilerOptions FINAL {
// Abort compilation with an error if we find a class that fails verification with a hard
// failure.
bool abort_on_hard_verifier_failure_;
+ // Same for soft failures.
+ bool abort_on_soft_verifier_failure_;
// Log initialization of initialization failures to this stream if not null.
std::unique_ptr<std::ostream> init_failure_output_;
diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h
index 9cb818a270..772d1b44de 100644
--- a/compiler/driver/compiler_options_map-inl.h
+++ b/compiler/driver/compiler_options_map-inl.h
@@ -60,6 +60,7 @@ inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string
}
map.AssignIfExists(Base::TopKProfileThreshold, &options->top_k_profile_threshold_);
map.AssignIfExists(Base::AbortOnHardVerifierFailure, &options->abort_on_hard_verifier_failure_);
+ map.AssignIfExists(Base::AbortOnSoftVerifierFailure, &options->abort_on_soft_verifier_failure_);
if (map.Exists(Base::DumpInitFailures)) {
if (!options->ParseDumpInitFailures(*map.Get(Base::DumpInitFailures), error_msg)) {
return false;
@@ -132,6 +133,9 @@ inline void AddCompilerOptionsArgumentParserOptions(Builder& b) {
.Define({"--abort-on-hard-verifier-error", "--no-abort-on-hard-verifier-error"})
.WithValues({true, false})
.IntoKey(Map::AbortOnHardVerifierFailure)
+ .Define({"--abort-on-soft-verifier-error", "--no-abort-on-soft-verifier-error"})
+ .WithValues({true, false})
+ .IntoKey(Map::AbortOnSoftVerifierFailure)
.Define("--dump-init-failures=_")
.template WithType<std::string>()
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
index 570bc5aca7..cc7563420a 100644
--- a/compiler/driver/compiler_options_map.def
+++ b/compiler/driver/compiler_options_map.def
@@ -50,6 +50,7 @@ COMPILER_OPTIONS_KEY (bool, GenerateBuildID)
COMPILER_OPTIONS_KEY (Unit, Debuggable)
COMPILER_OPTIONS_KEY (double, TopKProfileThreshold)
COMPILER_OPTIONS_KEY (bool, AbortOnHardVerifierFailure)
+COMPILER_OPTIONS_KEY (bool, AbortOnSoftVerifierFailure)
COMPILER_OPTIONS_KEY (std::string, DumpInitFailures)
COMPILER_OPTIONS_KEY (std::string, DumpCFG)
COMPILER_OPTIONS_KEY (Unit, DumpCFGAppend)
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index f759aa5ef8..b434e90f0d 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -61,7 +61,8 @@ class ExceptionTest : public CommonRuntimeTest {
}
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stack_maps(&allocator, kRuntimeISA);
stack_maps.BeginStackMapEntry(/* dex_pc */ 3u,
/* native_pc_offset */ 3u,
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 84f01828b2..9d0b5c865d 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -42,6 +42,7 @@
#include "base/bit_utils.h"
#include "base/bit_utils_iterator.h"
+#include "base/casts.h"
#include "bytecode_utils.h"
#include "class_linker.h"
#include "compiled_method.h"
@@ -59,6 +60,7 @@
#include "parallel_move_resolver.h"
#include "scoped_thread_state_change-inl.h"
#include "ssa_liveness_analysis.h"
+#include "stack_map_stream.h"
#include "thread-current-inl.h"
#include "utils/assembler.h"
@@ -141,6 +143,158 @@ static bool CheckTypeConsistency(HInstruction* instruction) {
return true;
}
+class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllocCodeGenerator> {
+ public:
+ static std::unique_ptr<CodeGenerationData> Create(ArenaStack* arena_stack,
+ InstructionSet instruction_set) {
+ ScopedArenaAllocator allocator(arena_stack);
+ void* memory = allocator.Alloc<CodeGenerationData>(kArenaAllocCodeGenerator);
+ return std::unique_ptr<CodeGenerationData>(
+ ::new (memory) CodeGenerationData(std::move(allocator), instruction_set));
+ }
+
+ ScopedArenaAllocator* GetScopedAllocator() {
+ return &allocator_;
+ }
+
+ void AddSlowPath(SlowPathCode* slow_path) {
+ slow_paths_.emplace_back(std::unique_ptr<SlowPathCode>(slow_path));
+ }
+
+ ArrayRef<const std::unique_ptr<SlowPathCode>> GetSlowPaths() const {
+ return ArrayRef<const std::unique_ptr<SlowPathCode>>(slow_paths_);
+ }
+
+ StackMapStream* GetStackMapStream() { return &stack_map_stream_; }
+
+ void ReserveJitStringRoot(StringReference string_reference, Handle<mirror::String> string) {
+ jit_string_roots_.Overwrite(string_reference,
+ reinterpret_cast64<uint64_t>(string.GetReference()));
+ }
+
+ uint64_t GetJitStringRootIndex(StringReference string_reference) const {
+ return jit_string_roots_.Get(string_reference);
+ }
+
+ size_t GetNumberOfJitStringRoots() const {
+ return jit_string_roots_.size();
+ }
+
+ void ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
+ jit_class_roots_.Overwrite(type_reference, reinterpret_cast64<uint64_t>(klass.GetReference()));
+ }
+
+ uint64_t GetJitClassRootIndex(TypeReference type_reference) const {
+ return jit_class_roots_.Get(type_reference);
+ }
+
+ size_t GetNumberOfJitClassRoots() const {
+ return jit_class_roots_.size();
+ }
+
+ size_t GetNumberOfJitRoots() const {
+ return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots();
+ }
+
+ void EmitJitRoots(Handle<mirror::ObjectArray<mirror::Object>> roots)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ CodeGenerationData(ScopedArenaAllocator&& allocator, InstructionSet instruction_set)
+ : allocator_(std::move(allocator)),
+ stack_map_stream_(&allocator_, instruction_set),
+ slow_paths_(allocator_.Adapter(kArenaAllocCodeGenerator)),
+ jit_string_roots_(StringReferenceValueComparator(),
+ allocator_.Adapter(kArenaAllocCodeGenerator)),
+ jit_class_roots_(TypeReferenceValueComparator(),
+ allocator_.Adapter(kArenaAllocCodeGenerator)) {
+ slow_paths_.reserve(kDefaultSlowPathsCapacity);
+ }
+
+ static constexpr size_t kDefaultSlowPathsCapacity = 8;
+
+ ScopedArenaAllocator allocator_;
+ StackMapStream stack_map_stream_;
+ ScopedArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
+
+ // Maps a StringReference (dex_file, string_index) to the index in the literal table.
+ // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
+ // will compute all the indices.
+ ScopedArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
+
+ // Maps a ClassReference (dex_file, type_index) to the index in the literal table.
+ // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
+ // will compute all the indices.
+ ScopedArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_;
+};
+
+void CodeGenerator::CodeGenerationData::EmitJitRoots(
+ Handle<mirror::ObjectArray<mirror::Object>> roots) {
+ DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ size_t index = 0;
+ for (auto& entry : jit_string_roots_) {
+ // Update the `roots` with the string, and replace the address temporarily
+ // stored to the index in the table.
+ uint64_t address = entry.second;
+ roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
+ DCHECK(roots->Get(index) != nullptr);
+ entry.second = index;
+ // Ensure the string is strongly interned. This is a requirement on how the JIT
+ // handles strings. b/32995596
+ class_linker->GetInternTable()->InternStrong(
+ reinterpret_cast<mirror::String*>(roots->Get(index)));
+ ++index;
+ }
+ for (auto& entry : jit_class_roots_) {
+ // Update the `roots` with the class, and replace the address temporarily
+ // stored to the index in the table.
+ uint64_t address = entry.second;
+ roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
+ DCHECK(roots->Get(index) != nullptr);
+ entry.second = index;
+ ++index;
+ }
+}
+
+ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() {
+ DCHECK(code_generation_data_ != nullptr);
+ return code_generation_data_->GetScopedAllocator();
+}
+
+StackMapStream* CodeGenerator::GetStackMapStream() {
+ DCHECK(code_generation_data_ != nullptr);
+ return code_generation_data_->GetStackMapStream();
+}
+
+void CodeGenerator::ReserveJitStringRoot(StringReference string_reference,
+ Handle<mirror::String> string) {
+ DCHECK(code_generation_data_ != nullptr);
+ code_generation_data_->ReserveJitStringRoot(string_reference, string);
+}
+
+uint64_t CodeGenerator::GetJitStringRootIndex(StringReference string_reference) {
+ DCHECK(code_generation_data_ != nullptr);
+ return code_generation_data_->GetJitStringRootIndex(string_reference);
+}
+
+void CodeGenerator::ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
+ DCHECK(code_generation_data_ != nullptr);
+ code_generation_data_->ReserveJitClassRoot(type_reference, klass);
+}
+
+uint64_t CodeGenerator::GetJitClassRootIndex(TypeReference type_reference) {
+ DCHECK(code_generation_data_ != nullptr);
+ return code_generation_data_->GetJitClassRootIndex(type_reference);
+}
+
+void CodeGenerator::EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
+ const uint8_t* roots_data ATTRIBUTE_UNUSED) {
+ DCHECK(code_generation_data_ != nullptr);
+ DCHECK_EQ(code_generation_data_->GetNumberOfJitStringRoots(), 0u);
+ DCHECK_EQ(code_generation_data_->GetNumberOfJitClassRoots(), 0u);
+}
+
size_t CodeGenerator::GetCacheOffset(uint32_t index) {
return sizeof(GcRoot<mirror::Object>) * index;
}
@@ -210,9 +364,10 @@ class DisassemblyScope {
void CodeGenerator::GenerateSlowPaths() {
+ DCHECK(code_generation_data_ != nullptr);
size_t code_start = 0;
- for (const std::unique_ptr<SlowPathCode>& slow_path_unique_ptr : slow_paths_) {
- SlowPathCode* slow_path = slow_path_unique_ptr.get();
+ for (const std::unique_ptr<SlowPathCode>& slow_path_ptr : code_generation_data_->GetSlowPaths()) {
+ SlowPathCode* slow_path = slow_path_ptr.get();
current_slow_path_ = slow_path;
if (disasm_info_ != nullptr) {
code_start = GetAssembler()->CodeSize();
@@ -227,7 +382,14 @@ void CodeGenerator::GenerateSlowPaths() {
current_slow_path_ = nullptr;
}
+void CodeGenerator::InitializeCodeGenerationData() {
+ DCHECK(code_generation_data_ == nullptr);
+ code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet());
+}
+
void CodeGenerator::Compile(CodeAllocator* allocator) {
+ InitializeCodeGenerationData();
+
// The register allocator already called `InitializeCodeGeneration`,
// where the frame size has been computed.
DCHECK(block_order_ != nullptr);
@@ -667,12 +829,54 @@ std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
}
}
+CodeGenerator::CodeGenerator(HGraph* graph,
+ size_t number_of_core_registers,
+ size_t number_of_fpu_registers,
+ size_t number_of_register_pairs,
+ uint32_t core_callee_save_mask,
+ uint32_t fpu_callee_save_mask,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
+ : frame_size_(0),
+ core_spill_mask_(0),
+ fpu_spill_mask_(0),
+ first_register_slot_in_slow_path_(0),
+ allocated_registers_(RegisterSet::Empty()),
+ blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
+ kArenaAllocCodeGenerator)),
+ blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
+ kArenaAllocCodeGenerator)),
+ number_of_core_registers_(number_of_core_registers),
+ number_of_fpu_registers_(number_of_fpu_registers),
+ number_of_register_pairs_(number_of_register_pairs),
+ core_callee_save_mask_(core_callee_save_mask),
+ fpu_callee_save_mask_(fpu_callee_save_mask),
+ block_order_(nullptr),
+ disasm_info_(nullptr),
+ stats_(stats),
+ graph_(graph),
+ compiler_options_(compiler_options),
+ current_slow_path_(nullptr),
+ current_block_index_(0),
+ is_leaf_(true),
+ requires_current_method_(false),
+ code_generation_data_() {
+}
+
+CodeGenerator::~CodeGenerator() {}
+
void CodeGenerator::ComputeStackMapAndMethodInfoSize(size_t* stack_map_size,
size_t* method_info_size) {
DCHECK(stack_map_size != nullptr);
DCHECK(method_info_size != nullptr);
- *stack_map_size = stack_map_stream_.PrepareForFillIn();
- *method_info_size = stack_map_stream_.ComputeMethodInfoSize();
+ StackMapStream* stack_map_stream = GetStackMapStream();
+ *stack_map_size = stack_map_stream->PrepareForFillIn();
+ *method_info_size = stack_map_stream->ComputeMethodInfoSize();
+}
+
+size_t CodeGenerator::GetNumberOfJitRoots() const {
+ DCHECK(code_generation_data_ != nullptr);
+ return code_generation_data_->GetNumberOfJitRoots();
}
static void CheckCovers(uint32_t dex_pc,
@@ -740,8 +944,9 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region,
MemoryRegion method_info_region,
const DexFile::CodeItem& code_item) {
- stack_map_stream_.FillInCodeInfo(stack_map_region);
- stack_map_stream_.FillInMethodInfo(method_info_region);
+ StackMapStream* stack_map_stream = GetStackMapStream();
+ stack_map_stream->FillInCodeInfo(stack_map_region);
+ stack_map_stream->FillInMethodInfo(method_info_region);
if (kIsDebugBuild) {
CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map_region), code_item);
}
@@ -791,11 +996,12 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
// Collect PC infos for the mapping table.
uint32_t native_pc = GetAssembler()->CodePosition();
+ StackMapStream* stack_map_stream = GetStackMapStream();
if (instruction == nullptr) {
// For stack overflow checks and native-debug-info entries without dex register
// mapping (i.e. start of basic block or start of slow path).
- stack_map_stream_.BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0);
- stack_map_stream_.EndStackMapEntry();
+ stack_map_stream->BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0);
+ stack_map_stream->EndStackMapEntry();
return;
}
LocationSummary* locations = instruction->GetLocations();
@@ -814,7 +1020,7 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
// The register mask must be a subset of callee-save registers.
DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
}
- stack_map_stream_.BeginStackMapEntry(outer_dex_pc,
+ stack_map_stream->BeginStackMapEntry(outer_dex_pc,
native_pc,
register_mask,
locations->GetStackMask(),
@@ -830,10 +1036,10 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
instruction->IsInvoke() &&
instruction->IsInvokeStaticOrDirect()) {
HInvoke* const invoke = instruction->AsInvoke();
- stack_map_stream_.AddInvoke(invoke->GetInvokeType(), invoke->GetDexMethodIndex());
+ stack_map_stream->AddInvoke(invoke->GetInvokeType(), invoke->GetDexMethodIndex());
}
}
- stack_map_stream_.EndStackMapEntry();
+ stack_map_stream->EndStackMapEntry();
HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
if (instruction->IsSuspendCheck() &&
@@ -844,10 +1050,10 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
// We duplicate the stack map as a marker that this stack map can be an OSR entry.
// Duplicating it avoids having the runtime recognize and skip an OSR stack map.
DCHECK(info->IsIrreducible());
- stack_map_stream_.BeginStackMapEntry(
+ stack_map_stream->BeginStackMapEntry(
dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, 0);
EmitEnvironment(instruction->GetEnvironment(), slow_path);
- stack_map_stream_.EndStackMapEntry();
+ stack_map_stream->EndStackMapEntry();
if (kIsDebugBuild) {
for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
HInstruction* in_environment = environment->GetInstructionAt(i);
@@ -867,21 +1073,22 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
} else if (kIsDebugBuild) {
// Ensure stack maps are unique, by checking that the native pc in the stack map
// last emitted is different than the native pc of the stack map just emitted.
- size_t number_of_stack_maps = stack_map_stream_.GetNumberOfStackMaps();
+ size_t number_of_stack_maps = stack_map_stream->GetNumberOfStackMaps();
if (number_of_stack_maps > 1) {
- DCHECK_NE(stack_map_stream_.GetStackMap(number_of_stack_maps - 1).native_pc_code_offset,
- stack_map_stream_.GetStackMap(number_of_stack_maps - 2).native_pc_code_offset);
+ DCHECK_NE(stack_map_stream->GetStackMap(number_of_stack_maps - 1).native_pc_code_offset,
+ stack_map_stream->GetStackMap(number_of_stack_maps - 2).native_pc_code_offset);
}
}
}
bool CodeGenerator::HasStackMapAtCurrentPc() {
uint32_t pc = GetAssembler()->CodeSize();
- size_t count = stack_map_stream_.GetNumberOfStackMaps();
+ StackMapStream* stack_map_stream = GetStackMapStream();
+ size_t count = stack_map_stream->GetNumberOfStackMaps();
if (count == 0) {
return false;
}
- CodeOffset native_pc_offset = stack_map_stream_.GetStackMap(count - 1).native_pc_code_offset;
+ CodeOffset native_pc_offset = stack_map_stream->GetStackMap(count - 1).native_pc_code_offset;
return (native_pc_offset.Uint32Value(GetInstructionSet()) == pc);
}
@@ -899,6 +1106,7 @@ void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
void CodeGenerator::RecordCatchBlockInfo() {
ArenaAllocator* allocator = graph_->GetAllocator();
+ StackMapStream* stack_map_stream = GetStackMapStream();
for (HBasicBlock* block : *block_order_) {
if (!block->IsCatchBlock()) {
@@ -915,7 +1123,7 @@ void CodeGenerator::RecordCatchBlockInfo() {
ArenaBitVector* stack_mask =
ArenaBitVector::Create(allocator, 0, /* expandable */ true, kArenaAllocCodeGenerator);
- stack_map_stream_.BeginStackMapEntry(dex_pc,
+ stack_map_stream->BeginStackMapEntry(dex_pc,
native_pc,
register_mask,
stack_mask,
@@ -933,19 +1141,19 @@ void CodeGenerator::RecordCatchBlockInfo() {
}
if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
} else {
Location location = current_phi->GetLocations()->Out();
switch (location.GetKind()) {
case Location::kStackSlot: {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
break;
}
case Location::kDoubleStackSlot: {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
++vreg;
DCHECK_LT(vreg, num_vregs);
@@ -960,17 +1168,23 @@ void CodeGenerator::RecordCatchBlockInfo() {
}
}
- stack_map_stream_.EndStackMapEntry();
+ stack_map_stream->EndStackMapEntry();
}
}
+void CodeGenerator::AddSlowPath(SlowPathCode* slow_path) {
+ DCHECK(code_generation_data_ != nullptr);
+ code_generation_data_->AddSlowPath(slow_path);
+}
+
void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) {
if (environment == nullptr) return;
+ StackMapStream* stack_map_stream = GetStackMapStream();
if (environment->GetParent() != nullptr) {
// We emit the parent environment first.
EmitEnvironment(environment->GetParent(), slow_path);
- stack_map_stream_.BeginInlineInfoEntry(environment->GetMethod(),
+ stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(),
environment->GetDexPc(),
environment->Size(),
&graph_->GetDexFile());
@@ -980,7 +1194,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
HInstruction* current = environment->GetInstructionAt(i);
if (current == nullptr) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
continue;
}
@@ -990,43 +1204,43 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
DCHECK_EQ(current, location.GetConstant());
if (current->IsLongConstant()) {
int64_t value = current->AsLongConstant()->GetValue();
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kConstant, Low32Bits(value));
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kConstant, High32Bits(value));
++i;
DCHECK_LT(i, environment_size);
} else if (current->IsDoubleConstant()) {
int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kConstant, Low32Bits(value));
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kConstant, High32Bits(value));
++i;
DCHECK_LT(i, environment_size);
} else if (current->IsIntConstant()) {
int32_t value = current->AsIntConstant()->GetValue();
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
} else if (current->IsNullConstant()) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
} else {
DCHECK(current->IsFloatConstant()) << current->DebugName();
int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
}
break;
}
case Location::kStackSlot: {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
break;
}
case Location::kDoubleStackSlot: {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
++i;
DCHECK_LT(i, environment_size);
@@ -1037,17 +1251,17 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
int id = location.reg();
if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
if (current->GetType() == DataType::Type::kInt64) {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
++i;
DCHECK_LT(i, environment_size);
}
} else {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
if (current->GetType() == DataType::Type::kInt64) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id);
++i;
DCHECK_LT(i, environment_size);
}
@@ -1059,17 +1273,17 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
int id = location.reg();
if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
if (current->GetType() == DataType::Type::kFloat64) {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
++i;
DCHECK_LT(i, environment_size);
}
} else {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
if (current->GetType() == DataType::Type::kFloat64) {
- stack_map_stream_.AddDexRegisterEntry(
+ stack_map_stream->AddDexRegisterEntry(
DexRegisterLocation::Kind::kInFpuRegisterHigh, id);
++i;
DCHECK_LT(i, environment_size);
@@ -1083,16 +1297,16 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
int high = location.high();
if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
} else {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low);
}
if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
++i;
} else {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high);
++i;
}
DCHECK_LT(i, environment_size);
@@ -1104,15 +1318,15 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
int high = location.high();
if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
} else {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low);
}
if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
} else {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high);
}
++i;
DCHECK_LT(i, environment_size);
@@ -1120,7 +1334,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
}
case Location::kInvalid: {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+ stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
break;
}
@@ -1130,7 +1344,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
}
if (environment->GetParent() != nullptr) {
- stack_map_stream_.EndInlineInfoEntry();
+ stack_map_stream->EndInlineInfoEntry();
}
}
@@ -1408,31 +1622,7 @@ void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
void CodeGenerator::EmitJitRoots(uint8_t* code,
Handle<mirror::ObjectArray<mirror::Object>> roots,
const uint8_t* roots_data) {
- DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- size_t index = 0;
- for (auto& entry : jit_string_roots_) {
- // Update the `roots` with the string, and replace the address temporarily
- // stored to the index in the table.
- uint64_t address = entry.second;
- roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
- DCHECK(roots->Get(index) != nullptr);
- entry.second = index;
- // Ensure the string is strongly interned. This is a requirement on how the JIT
- // handles strings. b/32995596
- class_linker->GetInternTable()->InternStrong(
- reinterpret_cast<mirror::String*>(roots->Get(index)));
- ++index;
- }
- for (auto& entry : jit_class_roots_) {
- // Update the `roots` with the class, and replace the address temporarily
- // stored to the index in the table.
- uint64_t address = entry.second;
- roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
- DCHECK(roots->Get(index) != nullptr);
- entry.second = index;
- ++index;
- }
+ code_generation_data_->EmitJitRoots(roots);
EmitJitRootPatches(code, roots_data);
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 2904b71991..64c88eb67c 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -32,7 +32,7 @@
#include "optimizing_compiler_stats.h"
#include "read_barrier_option.h"
#include "stack.h"
-#include "stack_map_stream.h"
+#include "stack_map.h"
#include "string_reference.h"
#include "type_reference.h"
#include "utils/label.h"
@@ -61,6 +61,7 @@ class Assembler;
class CodeGenerator;
class CompilerDriver;
class CompilerOptions;
+class StackMapStream;
class ParallelMoveResolver;
namespace linker {
@@ -190,7 +191,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
const InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats = nullptr);
- virtual ~CodeGenerator() {}
+ virtual ~CodeGenerator();
// Get the graph. This is the outermost graph, never the graph of a method being inlined.
HGraph* GetGraph() const { return graph_; }
@@ -338,18 +339,16 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// TODO: Replace with a catch-entering instruction that records the environment.
void RecordCatchBlockInfo();
- // TODO: Avoid creating the `std::unique_ptr` here.
- void AddSlowPath(SlowPathCode* slow_path) {
- slow_paths_.push_back(std::unique_ptr<SlowPathCode>(slow_path));
- }
+ // Get the ScopedArenaAllocator used for codegen memory allocation.
+ ScopedArenaAllocator* GetScopedAllocator();
+
+ void AddSlowPath(SlowPathCode* slow_path);
void BuildStackMaps(MemoryRegion stack_map_region,
MemoryRegion method_info_region,
const DexFile::CodeItem& code_item);
void ComputeStackMapAndMethodInfoSize(size_t* stack_map_size, size_t* method_info_size);
- size_t GetNumberOfJitRoots() const {
- return jit_string_roots_.size() + jit_class_roots_.size();
- }
+ size_t GetNumberOfJitRoots() const;
// Fills the `literals` array with literals collected during code generation.
// Also emits literal patches.
@@ -600,38 +599,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
uint32_t core_callee_save_mask,
uint32_t fpu_callee_save_mask,
const CompilerOptions& compiler_options,
- OptimizingCompilerStats* stats)
- : frame_size_(0),
- core_spill_mask_(0),
- fpu_spill_mask_(0),
- first_register_slot_in_slow_path_(0),
- allocated_registers_(RegisterSet::Empty()),
- blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
- kArenaAllocCodeGenerator)),
- blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
- kArenaAllocCodeGenerator)),
- number_of_core_registers_(number_of_core_registers),
- number_of_fpu_registers_(number_of_fpu_registers),
- number_of_register_pairs_(number_of_register_pairs),
- core_callee_save_mask_(core_callee_save_mask),
- fpu_callee_save_mask_(fpu_callee_save_mask),
- stack_map_stream_(graph->GetAllocator(), graph->GetInstructionSet()),
- block_order_(nullptr),
- jit_string_roots_(StringReferenceValueComparator(),
- graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
- jit_class_roots_(TypeReferenceValueComparator(),
- graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
- disasm_info_(nullptr),
- stats_(stats),
- graph_(graph),
- compiler_options_(compiler_options),
- slow_paths_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
- current_slow_path_(nullptr),
- current_block_index_(0),
- is_leaf_(true),
- requires_current_method_(false) {
- slow_paths_.reserve(8);
- }
+ OptimizingCompilerStats* stats);
virtual HGraphVisitor* GetLocationBuilder() = 0;
virtual HGraphVisitor* GetInstructionVisitor() = 0;
@@ -687,12 +655,15 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
return current_slow_path_;
}
+ StackMapStream* GetStackMapStream();
+
+ void ReserveJitStringRoot(StringReference string_reference, Handle<mirror::String> string);
+ uint64_t GetJitStringRootIndex(StringReference string_reference);
+ void ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass);
+ uint64_t GetJitClassRootIndex(TypeReference type_reference);
+
// Emit the patches assocatied with JIT roots. Only applies to JIT compiled code.
- virtual void EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
- const uint8_t* roots_data ATTRIBUTE_UNUSED) {
- DCHECK_EQ(jit_string_roots_.size(), 0u);
- DCHECK_EQ(jit_class_roots_.size(), 0u);
- }
+ virtual void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data);
// Frame size required for this method.
uint32_t frame_size_;
@@ -714,24 +685,15 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
const uint32_t core_callee_save_mask_;
const uint32_t fpu_callee_save_mask_;
- StackMapStream stack_map_stream_;
-
// The order to use for code generation.
const ArenaVector<HBasicBlock*>* block_order_;
- // Maps a StringReference (dex_file, string_index) to the index in the literal table.
- // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
- // will compute all the indices.
- ArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
-
- // Maps a ClassReference (dex_file, type_index) to the index in the literal table.
- // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
- // will compute all the indices.
- ArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_;
-
DisassemblyInformation* disasm_info_;
private:
+ class CodeGenerationData;
+
+ void InitializeCodeGenerationData();
size_t GetStackOffsetOfSavedRegister(size_t index);
void GenerateSlowPaths();
void BlockIfInRegister(Location location, bool is_out = false) const;
@@ -742,8 +704,6 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
HGraph* const graph_;
const CompilerOptions& compiler_options_;
- ArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
-
// The current slow-path that we're generating code for.
SlowPathCode* current_slow_path_;
@@ -759,6 +719,12 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// needs the environment including a valid stack frame.
bool requires_current_method_;
+ // The CodeGenerationData contains a ScopedArenaAllocator intended for reusing the
+ // ArenaStack memory allocated in previous passes instead of adding to the memory
+ // held by the ArenaAllocator. This ScopedArenaAllocator is created in
+ // CodeGenerator::Compile() and remains alive until the CodeGenerator is destroyed.
+ std::unique_ptr<CodeGenerationData> code_generation_data_;
+
friend class OptimizingCFITest;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
@@ -863,7 +829,8 @@ class SlowPathGenerator {
{{}, {graph_->GetAllocator()->Adapter(kArenaAllocSlowPaths)}});
}
// Cannot share: create and add new slow-path for this particular dex-pc.
- SlowPathCodeType* slow_path = new (graph_->GetAllocator()) SlowPathCodeType(instruction);
+ SlowPathCodeType* slow_path =
+ new (codegen_->GetScopedAllocator()) SlowPathCodeType(instruction);
iter->second.emplace_back(std::make_pair(instruction, slow_path));
codegen_->AddSlowPath(slow_path);
return slow_path;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index e6e69846e4..c7811ab976 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2204,7 +2204,8 @@ void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruct
SuspendCheckSlowPathARM64* slow_path =
down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARM64(instruction, successor);
+ slow_path =
+ new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathARM64(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -2305,11 +2306,12 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
Location base_loc = locations->InAt(0);
Location out = locations->Out();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
- DataType::Type field_type = field_info.GetFieldType();
+ DCHECK_EQ(DataType::Size(field_info.GetFieldType()), DataType::Size(instruction->GetType()));
+ DataType::Type load_type = instruction->GetType();
MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset());
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier &&
- field_type == DataType::Type::kReference) {
+ load_type == DataType::Type::kReference) {
// Object FieldGet with Baker's read barrier case.
// /* HeapReference<Object> */ out = *(base + offset)
Register base = RegisterFrom(base_loc, DataType::Type::kReference);
@@ -2336,10 +2338,10 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
} else {
// Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
- codegen_->Load(field_type, OutputCPURegister(instruction), field);
+ codegen_->Load(load_type, OutputCPURegister(instruction), field);
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
- if (field_type == DataType::Type::kReference) {
+ if (load_type == DataType::Type::kReference) {
// If read barriers are enabled, emit read barriers other than
// Baker's using a slow path (and also unpoison the loaded
// reference, if heap poisoning is enabled).
@@ -3011,7 +3013,7 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARM64(instruction);
+ slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
vixl::aarch64::Label non_zero;
@@ -3126,7 +3128,7 @@ void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
BoundsCheckSlowPathARM64* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARM64(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
__ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
__ B(slow_path->GetEntryLabel(), hs);
@@ -3143,7 +3145,7 @@ void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64(
+ SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
@@ -3500,7 +3502,7 @@ void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARM64(instruction);
+ new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
@@ -4055,8 +4057,8 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
kWithoutReadBarrier);
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -4087,8 +4089,8 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -4176,8 +4178,8 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeARM64* type_check_slow_path =
- new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
- is_type_check_slow_path_fatal);
+ new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
+ instruction, is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
vixl::aarch64::Label done;
@@ -4685,8 +4687,7 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddres
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLiteral(
const DexFile& dex_file, dex::StringIndex string_index, Handle<mirror::String> handle) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -4694,8 +4695,7 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLitera
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral(
const DexFile& dex_file, dex::TypeIndex type_index, Handle<mirror::Class> handle) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -5010,7 +5010,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
bool do_clinit = cls->MustGenerateClinitCheck();
if (generate_null_check || do_clinit) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64(
+ SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
cls, cls, cls->GetDexPc(), do_clinit, bss_entry_temp, bss_entry_adrp_label);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -5150,7 +5150,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
ldr_label,
kCompilerReadBarrierOption);
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label);
codegen_->AddSlowPath(slow_path);
__ Cbz(out.X(), slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -5391,8 +5391,7 @@ void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) NullCheckSlowPathARM64(instruction);
+ SlowPathCodeARM64* slow_path = new (GetScopedAllocator()) NullCheckSlowPathARM64(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -6034,7 +6033,7 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(
// Slow path marking the GC root `root`. The entrypoint will
// be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root);
+ new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root);
codegen_->AddSlowPath(slow_path);
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -6293,7 +6292,7 @@ void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction*
// Slow path marking the object `ref` when the GC is marking. The
// entrypoint will be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
+ new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
instruction,
ref,
obj,
@@ -6351,7 +6350,7 @@ void CodeGeneratorARM64::UpdateReferenceFieldWithBakerReadBarrier(HInstruction*
// Slow path updating the object reference at address `obj + field_offset`
// when the GC is marking. The entrypoint will be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
+ new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
instruction,
ref,
obj,
@@ -6478,7 +6477,7 @@ void CodeGeneratorARM64::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator())
+ SlowPathCodeARM64* slow_path = new (GetScopedAllocator())
ReadBarrierForHeapReferenceSlowPathARM64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -6514,7 +6513,7 @@ void CodeGeneratorARM64::GenerateReadBarrierForRootSlow(HInstruction* instructio
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARM64(instruction, out, root);
+ new (GetScopedAllocator()) ReadBarrierForRootSlowPathARM64(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -6560,17 +6559,13 @@ void CodeGeneratorARM64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_
for (const auto& entry : jit_string_patches_) {
const StringReference& string_reference = entry.first;
vixl::aarch64::Literal<uint32_t>* table_entry_literal = entry.second;
- const auto it = jit_string_roots_.find(string_reference);
- DCHECK(it != jit_string_roots_.end());
- uint64_t index_in_table = it->second;
+ uint64_t index_in_table = GetJitStringRootIndex(string_reference);
PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
}
for (const auto& entry : jit_class_patches_) {
const TypeReference& type_reference = entry.first;
vixl::aarch64::Literal<uint32_t>* table_entry_literal = entry.second;
- const auto it = jit_class_roots_.find(type_reference);
- DCHECK(it != jit_class_roots_.end());
- uint64_t index_in_table = it->second;
+ uint64_t index_in_table = GetJitClassRootIndex(type_reference);
PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
}
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 251f390ce3..90f3ae8a01 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -4733,7 +4733,7 @@ void LocationsBuilderARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
DivZeroCheckSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARMVIXL(instruction);
+ new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -5741,17 +5741,18 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
Location out = locations->Out();
bool is_volatile = field_info.IsVolatile();
bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
- DataType::Type field_type = field_info.GetFieldType();
+ DCHECK_EQ(DataType::Size(field_info.GetFieldType()), DataType::Size(instruction->GetType()));
+ DataType::Type load_type = instruction->GetType();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
- switch (field_type) {
+ switch (load_type) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32: {
- LoadOperandType operand_type = GetLoadOperandType(field_type);
+ LoadOperandType operand_type = GetLoadOperandType(load_type);
GetAssembler()->LoadFromOffset(operand_type, RegisterFrom(out), base, offset);
break;
}
@@ -5811,11 +5812,11 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
}
case DataType::Type::kVoid:
- LOG(FATAL) << "Unreachable type " << field_type;
+ LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
}
- if (field_type == DataType::Type::kReference || field_type == DataType::Type::kFloat64) {
+ if (load_type == DataType::Type::kReference || load_type == DataType::Type::kFloat64) {
// Potential implicit null checks, in the case of reference or
// double fields, are handled in the previous switch statement.
} else {
@@ -5829,7 +5830,7 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
}
if (is_volatile) {
- if (field_type == DataType::Type::kReference) {
+ if (load_type == DataType::Type::kReference) {
// Memory barriers, in the case of references, are also handled
// in the previous switch statement.
} else {
@@ -5959,7 +5960,7 @@ void CodeGeneratorARMVIXL::GenerateImplicitNullCheck(HNullCheck* instruction) {
void CodeGeneratorARMVIXL::GenerateExplicitNullCheck(HNullCheck* instruction) {
NullCheckSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) NullCheckSlowPathARMVIXL(instruction);
+ new (GetScopedAllocator()) NullCheckSlowPathARMVIXL(instruction);
AddSlowPath(slow_path);
__ CompareAndBranchIfZero(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
}
@@ -5977,13 +5978,13 @@ void CodeGeneratorARMVIXL::LoadFromShiftedRegOffset(DataType::Type type,
MemOperand mem_address(base, reg_index, vixl32::LSL, shift_count);
switch (type) {
+ case DataType::Type::kBool:
case DataType::Type::kUint8:
+ __ Ldrb(cond, RegisterFrom(out_loc), mem_address);
+ break;
case DataType::Type::kInt8:
__ Ldrsb(cond, RegisterFrom(out_loc), mem_address);
break;
- case DataType::Type::kBool:
- __ Ldrb(cond, RegisterFrom(out_loc), mem_address);
- break;
case DataType::Type::kUint16:
__ Ldrh(cond, RegisterFrom(out_loc), mem_address);
break;
@@ -6432,7 +6433,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
SlowPathCodeARMVIXL* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARMVIXL(instruction);
+ slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
vixl32::Label non_zero;
@@ -6693,7 +6694,7 @@ void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction
int32_t index = Int32ConstantFrom(index_loc);
if (index < 0 || index >= length) {
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
} else {
@@ -6704,13 +6705,13 @@ void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction
}
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
__ Cmp(RegisterFrom(index_loc), length);
codegen_->AddSlowPath(slow_path);
__ B(hs, slow_path->GetEntryLabel());
} else {
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
__ Cmp(RegisterFrom(length_loc), InputOperandAt(instruction, 0));
codegen_->AddSlowPath(slow_path);
__ B(ls, slow_path->GetEntryLabel());
@@ -6777,7 +6778,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instru
down_cast<SuspendCheckSlowPathARMVIXL*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
slow_path =
- new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARMVIXL(instruction, successor);
+ new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathARMVIXL(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -7214,8 +7215,9 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- LoadClassSlowPathARMVIXL* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ LoadClassSlowPathARMVIXL* slow_path =
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
@@ -7241,10 +7243,10 @@ void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
LoadClassSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
- check,
- check->GetDexPc(),
- /* do_clinit */ true);
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
+ check,
+ check->GetDexPc(),
+ /* do_clinit */ true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
}
@@ -7354,7 +7356,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
codegen_->EmitMovwMovtPlaceholder(labels, temp);
GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) LoadStringSlowPathARMVIXL(load);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -7681,8 +7683,8 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
kWithoutReadBarrier);
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -7710,8 +7712,8 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7789,8 +7791,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeARMVIXL* type_check_slow_path =
- new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
- is_type_check_slow_path_fatal);
+ new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
+ instruction, is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
vixl32::Label done;
@@ -8451,7 +8453,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
// Slow path marking the GC root `root`. The entrypoint will
// be loaded by the slow path code.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
+ new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
codegen_->AddSlowPath(slow_path);
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -8700,7 +8702,7 @@ void CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier(HInstructio
// Slow path marking the object `ref` when the GC is marking. The
// entrypoint will be loaded by the slow path code.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
+ new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
instruction, ref, obj, offset, index, scale_factor, needs_null_check, temp_reg);
AddSlowPath(slow_path);
@@ -8746,8 +8748,8 @@ void CodeGeneratorARMVIXL::UpdateReferenceFieldWithBakerReadBarrier(HInstruction
// Slow path updating the object reference at address `obj + field_offset`
// when the GC is marking. The entrypoint will be loaded by the slow path code.
- SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator())
- LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
+ SlowPathCodeARMVIXL* slow_path =
+ new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
instruction,
ref,
obj,
@@ -8858,7 +8860,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator())
+ SlowPathCodeARMVIXL* slow_path = new (GetScopedAllocator())
ReadBarrierForHeapReferenceSlowPathARMVIXL(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -8894,7 +8896,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruct
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root);
+ new (GetScopedAllocator()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -9108,8 +9110,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(
const DexFile& dex_file,
dex::StringIndex string_index,
Handle<mirror::String> handle) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() {
@@ -9120,8 +9121,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
Handle<mirror::Class> handle) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() {
@@ -9401,17 +9401,13 @@ void CodeGeneratorARMVIXL::EmitJitRootPatches(uint8_t* code, const uint8_t* root
for (const auto& entry : jit_string_patches_) {
const StringReference& string_reference = entry.first;
VIXLUInt32Literal* table_entry_literal = entry.second;
- const auto it = jit_string_roots_.find(string_reference);
- DCHECK(it != jit_string_roots_.end());
- uint64_t index_in_table = it->second;
+ uint64_t index_in_table = GetJitStringRootIndex(string_reference);
PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
}
for (const auto& entry : jit_class_patches_) {
const TypeReference& type_reference = entry.first;
VIXLUInt32Literal* table_entry_literal = entry.second;
- const auto it = jit_class_roots_.find(type_reference);
- DCHECK(it != jit_class_roots_.end());
- uint64_t index_in_table = it->second;
+ uint64_t index_in_table = GetJitClassRootIndex(type_reference);
PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
}
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index e58f43e1bb..2f65e8c958 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -33,6 +33,7 @@
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "offsets.h"
+#include "stack_map_stream.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/mips/assembler_mips.h"
@@ -1128,12 +1129,13 @@ void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
__ FinalizeCode();
// Adjust native pc offsets in stack maps.
- for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
+ StackMapStream* stack_map_stream = GetStackMapStream();
+ for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
uint32_t old_position =
- stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips);
+ stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
- stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
+ stack_map_stream->SetStackMapNativePcOffset(i, new_position);
}
// Adjust pc offsets for the disassembly information.
@@ -1298,7 +1300,7 @@ void ParallelMoveResolverMIPS::Exchange(int index1, int index2, bool double_slot
// automatically unspilled when the scratch scope object is destroyed).
ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
// If V0 spills onto the stack, SP-relative offsets need to be adjusted.
- int stack_offset = ensure_scratch.IsSpilled() ? kMipsWordSize : 0;
+ int stack_offset = ensure_scratch.IsSpilled() ? kStackAlignment : 0;
for (int i = 0; i <= (double_slot ? 1 : 0); i++, stack_offset += kMipsWordSize) {
__ LoadFromOffset(kLoadWord,
Register(ensure_scratch.GetRegister()),
@@ -1788,21 +1790,19 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo
CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
const DexFile& dex_file,
- dex::StringIndex dex_index,
+ dex::StringIndex string_index,
Handle<mirror::String> handle) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
- jit_string_patches_.emplace_back(dex_file, dex_index.index_);
+ ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
+ jit_string_patches_.emplace_back(dex_file, string_index.index_);
return &jit_string_patches_.back();
}
CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootClassPatch(
const DexFile& dex_file,
- dex::TypeIndex dex_index,
+ dex::TypeIndex type_index,
Handle<mirror::Class> handle) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
- jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+ ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
+ jit_class_patches_.emplace_back(dex_file, type_index.index_);
return &jit_class_patches_.back();
}
@@ -1834,17 +1834,13 @@ void CodeGeneratorMIPS::PatchJitRootUse(uint8_t* code,
void CodeGeneratorMIPS::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
for (const JitPatchInfo& info : jit_string_patches_) {
- const auto it = jit_string_roots_.find(StringReference(&info.target_dex_file,
- dex::StringIndex(info.index)));
- DCHECK(it != jit_string_roots_.end());
- uint64_t index_in_table = it->second;
+ StringReference string_reference(&info.target_dex_file, dex::StringIndex(info.index));
+ uint64_t index_in_table = GetJitStringRootIndex(string_reference);
PatchJitRootUse(code, roots_data, info, index_in_table);
}
for (const JitPatchInfo& info : jit_class_patches_) {
- const auto it = jit_class_roots_.find(TypeReference(&info.target_dex_file,
- dex::TypeIndex(info.index)));
- DCHECK(it != jit_class_roots_.end());
- uint64_t index_in_table = it->second;
+ TypeReference type_reference(&info.target_dex_file, dex::TypeIndex(info.index));
+ uint64_t index_in_table = GetJitClassRootIndex(type_reference);
PatchJitRootUse(code, roots_data, info, index_in_table);
}
}
@@ -1998,7 +1994,7 @@ void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATT
void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathMIPS* slow_path =
- new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS(instruction, successor);
+ new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS(instruction, successor);
codegen_->AddSlowPath(slow_path);
__ LoadFromOffset(kLoadUnsignedHalfword,
@@ -2986,7 +2982,7 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
SlowPathCodeMIPS* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS(instruction);
+ slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
MipsLabel non_zero;
@@ -3171,7 +3167,7 @@ void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
BoundsCheckSlowPathMIPS* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
Register index = locations->InAt(0).AsRegister<Register>();
@@ -3263,8 +3259,8 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
- is_type_check_slow_path_fatal);
+ new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
+ instruction, is_type_check_slow_path_fatal);
codegen_->AddSlowPath(slow_path);
// Avoid this check if we know `obj` is not null.
@@ -3427,7 +3423,7 @@ void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS(
+ SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
check->GetLoadClass(),
check,
check->GetDexPc(),
@@ -3884,7 +3880,7 @@ void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS(instruction);
+ new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
DataType::Type type = instruction->GetType();
@@ -6205,7 +6201,8 @@ void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const Field
void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info,
uint32_t dex_pc) {
- DataType::Type type = field_info.GetFieldType();
+ DCHECK_EQ(DataType::Size(field_info.GetFieldType()), DataType::Size(instruction->GetType()));
+ DataType::Type type = instruction->GetType();
LocationSummary* locations = instruction->GetLocations();
Location obj_loc = locations->InAt(0);
Register obj = obj_loc.AsRegister<Register>();
@@ -6247,8 +6244,11 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
InvokeRuntimeCallingConvention calling_convention;
__ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
// Do implicit Null check
- __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ __ LoadFromOffset(kLoadWord,
+ ZERO,
+ locations->GetTemp(0).AsRegister<Register>(),
+ 0,
+ null_checker);
codegen_->InvokeRuntime(kQuickA64Load, instruction, dex_pc);
CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
if (type == DataType::Type::kFloat64) {
@@ -6401,8 +6401,11 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
InvokeRuntimeCallingConvention calling_convention;
__ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
// Do implicit Null check.
- __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ __ LoadFromOffset(kLoadWord,
+ ZERO,
+ locations->GetTemp(0).AsRegister<Register>(),
+ 0,
+ null_checker);
if (type == DataType::Type::kFloat64) {
// Pass FP parameters in core registers.
if (value_location.IsFpuRegister()) {
@@ -6692,7 +6695,7 @@ void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruc
// Slow path marking the GC root `root`.
Location temp = Location::RegisterLocation(T9);
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(
+ new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(
instruction,
root,
/*entrypoint*/ temp);
@@ -7019,14 +7022,14 @@ void CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier(HInstruction*
// to be null in this code path.
DCHECK_EQ(offset, 0u);
DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
- slow_path = new (GetGraph()->GetAllocator())
+ slow_path = new (GetScopedAllocator())
ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
ref,
obj,
/* field_offset */ index,
temp_reg);
} else {
- slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
+ slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
}
AddSlowPath(slow_path);
@@ -7062,7 +7065,7 @@ void CodeGeneratorMIPS::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator())
+ SlowPathCodeMIPS* slow_path = new (GetScopedAllocator())
ReadBarrierForHeapReferenceSlowPathMIPS(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -7098,7 +7101,7 @@ void CodeGeneratorMIPS::GenerateReadBarrierForRootSlow(HInstruction* instruction
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
+ new (GetScopedAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -7268,8 +7271,8 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
maybe_temp_loc,
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bne(out, cls, slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -7297,8 +7300,8 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7677,7 +7680,9 @@ void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
break;
}
if (has_irreducible_loops) {
- codegen_->ClobberRA();
+ if (load_kind != HLoadClass::LoadKind::kBootImageAddress) {
+ codegen_->ClobberRA();
+ }
break;
}
FALLTHROUGH_INTENDED;
@@ -7841,7 +7846,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS(
+ SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -7894,7 +7899,9 @@ void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
break;
}
if (has_irreducible_loops) {
- codegen_->ClobberRA();
+ if (load_kind != HLoadString::LoadKind::kBootImageAddress) {
+ codegen_->ClobberRA();
+ }
break;
}
FALLTHROUGH_INTENDED;
@@ -8006,7 +8013,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS(load, info_high);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load, info_high);
codegen_->AddSlowPath(slow_path);
__ Beqz(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -8333,7 +8340,7 @@ void CodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS(instruction);
+ SlowPathCodeMIPS* slow_path = new (GetScopedAllocator()) NullCheckSlowPathMIPS(instruction);
AddSlowPath(slow_path);
Location obj = instruction->GetLocations()->InAt(0);
@@ -9026,6 +9033,15 @@ void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
+ if (!codegen_->GetInstructionSetFeatures().IsR6()) {
+ uint32_t num_entries = switch_instr->GetNumEntries();
+ if (num_entries > InstructionCodeGeneratorMIPS::kPackedSwitchJumpTableThreshold) {
+ // When there's no HMipsComputeBaseMethodAddress input, R2 uses the NAL
+ // instruction to simulate PC-relative addressing when accessing the jump table.
+ // NAL clobbers RA. Make sure RA is preserved.
+ codegen_->ClobberRA();
+ }
+ }
}
void InstructionCodeGeneratorMIPS::GenPackedSwitchWithCompares(Register value_reg,
@@ -9109,13 +9125,17 @@ void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr
HBasicBlock* switch_block = switch_instr->GetBlock();
HBasicBlock* default_block = switch_instr->GetDefaultBlock();
- if (codegen_->GetInstructionSetFeatures().IsR6() &&
- num_entries > kPackedSwitchJumpTableThreshold) {
+ if (num_entries > kPackedSwitchJumpTableThreshold) {
// R6 uses PC-relative addressing to access the jump table.
- // R2, OTOH, requires an HMipsComputeBaseMethodAddress input to access
- // the jump table and it is implemented by changing HPackedSwitch to
- // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress.
- // See VisitMipsPackedSwitch() for the table-based implementation on R2.
+ //
+ // R2, OTOH, uses an HMipsComputeBaseMethodAddress input (when available)
+ // to access the jump table and it is implemented by changing HPackedSwitch to
+ // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress (see
+ // VisitMipsPackedSwitch()).
+ //
+ // When there's no HMipsComputeBaseMethodAddress input (e.g. in presence of
+ // irreducible loops), R2 uses the NAL instruction to simulate PC-relative
+ // addressing.
GenTableBasedPackedSwitch(value_reg,
ZERO,
lower_bound,
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 5f2f90004d..7845e312cb 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -662,10 +662,10 @@ class CodeGeneratorMIPS : public CodeGenerator {
const JitPatchInfo& info,
uint64_t index_in_table) const;
JitPatchInfo* NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index,
+ dex::StringIndex string_index,
Handle<mirror::String> handle);
JitPatchInfo* NewJitRootClassPatch(const DexFile& dex_file,
- dex::TypeIndex dex_index,
+ dex::TypeIndex type_index,
Handle<mirror::Class> handle);
private:
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 11120cf07a..6cbfa14f15 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -31,6 +31,7 @@
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "offsets.h"
+#include "stack_map_stream.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/mips64/assembler_mips64.h"
@@ -1072,12 +1073,13 @@ void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
__ FinalizeCode();
// Adjust native pc offsets in stack maps.
- for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
+ StackMapStream* stack_map_stream = GetStackMapStream();
+ for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
uint32_t old_position =
- stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64);
+ stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
- stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
+ stack_map_stream->SetStackMapNativePcOffset(i, new_position);
}
// Adjust pc offsets for the disassembly information.
@@ -1681,8 +1683,7 @@ void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchIn
Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file,
dex::StringIndex string_index,
Handle<mirror::String> handle) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
@@ -1691,8 +1692,7 @@ Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_fil
Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
Handle<mirror::Class> handle) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
@@ -1712,17 +1712,13 @@ void CodeGeneratorMIPS64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots
for (const auto& entry : jit_string_patches_) {
const StringReference& string_reference = entry.first;
Literal* table_entry_literal = entry.second;
- const auto it = jit_string_roots_.find(string_reference);
- DCHECK(it != jit_string_roots_.end());
- uint64_t index_in_table = it->second;
+ uint64_t index_in_table = GetJitStringRootIndex(string_reference);
PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
}
for (const auto& entry : jit_class_patches_) {
const TypeReference& type_reference = entry.first;
Literal* table_entry_literal = entry.second;
- const auto it = jit_class_roots_.find(type_reference);
- DCHECK(it != jit_class_roots_.end());
- uint64_t index_in_table = it->second;
+ uint64_t index_in_table = GetJitClassRootIndex(type_reference);
PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
}
}
@@ -1835,7 +1831,7 @@ void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind A
void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor);
+ new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor);
codegen_->AddSlowPath(slow_path);
__ LoadFromOffset(kLoadUnsignedHalfword,
@@ -2543,7 +2539,7 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
SlowPathCodeMIPS64* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS64(instruction);
+ slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
Mips64Label non_zero;
@@ -2700,7 +2696,7 @@ void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
BoundsCheckSlowPathMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS64(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
@@ -2792,8 +2788,8 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
- is_type_check_slow_path_fatal);
+ new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
+ instruction, is_type_check_slow_path_fatal);
codegen_->AddSlowPath(slow_path);
// Avoid this check if we know `obj` is not null.
@@ -2956,7 +2952,7 @@ void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64(
+ SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
check->GetLoadClass(),
check,
check->GetDexPc(),
@@ -3430,7 +3426,7 @@ void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS64(instruction);
+ new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
@@ -4674,7 +4670,8 @@ void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DataType::Type type = field_info.GetFieldType();
+ DCHECK_EQ(DataType::Size(field_info.GetFieldType()), DataType::Size(instruction->GetType()));
+ DataType::Type type = instruction->GetType();
LocationSummary* locations = instruction->GetLocations();
Location obj_loc = locations->InAt(0);
GpuRegister obj = obj_loc.AsRegister<GpuRegister>();
@@ -5050,7 +5047,7 @@ void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(HInstruction* instr
// Slow path marking the GC root `root`.
Location temp = Location::RegisterLocation(T9);
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(
+ new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(
instruction,
root,
/*entrypoint*/ temp);
@@ -5335,14 +5332,14 @@ void CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction
// above are expected to be null in this code path.
DCHECK_EQ(offset, 0u);
DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
- slow_path = new (GetGraph()->GetAllocator())
+ slow_path = new (GetScopedAllocator())
ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
ref,
obj,
/* field_offset */ index,
temp_reg);
} else {
- slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
+ slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
}
AddSlowPath(slow_path);
@@ -5378,7 +5375,7 @@ void CodeGeneratorMIPS64::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator())
+ SlowPathCodeMIPS64* slow_path = new (GetScopedAllocator())
ReadBarrierForHeapReferenceSlowPathMIPS64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -5414,7 +5411,7 @@ void CodeGeneratorMIPS64::GenerateReadBarrierForRootSlow(HInstruction* instructi
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
+ new (GetScopedAllocator()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
@@ -5584,8 +5581,8 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
maybe_temp_loc,
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bnec(out, cls, slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -5613,8 +5610,8 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
break;
@@ -6082,7 +6079,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64(
+ SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -6200,7 +6197,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS64(load, info_high);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS64(load, info_high);
codegen_->AddSlowPath(slow_path);
__ Beqzc(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -6464,7 +6461,7 @@ void CodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
void CodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS64(instruction);
+ new (GetScopedAllocator()) NullCheckSlowPathMIPS64(instruction);
AddSlowPath(slow_path);
Location obj = instruction->GetLocations()->InAt(0);
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 10aced02c3..152a59c208 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -1041,7 +1041,8 @@ void LocationsBuilderARM64::VisitVecSADAccumulate(HVecSADAccumulate* instruction
LocationSummary* locations = instruction->GetLocations();
HVecOperation* a = instruction->InputAt(1)->AsVecOperation();
HVecOperation* b = instruction->InputAt(2)->AsVecOperation();
- DCHECK_EQ(a->GetPackedType(), b->GetPackedType());
+ DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()),
+ HVecOperation::ToSignedType(b->GetPackedType()));
switch (a->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
@@ -1087,7 +1088,8 @@ void InstructionCodeGeneratorARM64::VisitVecSADAccumulate(HVecSADAccumulate* ins
// Handle all feasible acc_T += sad(a_S, b_S) type combinations (T x S).
HVecOperation* a = instruction->InputAt(1)->AsVecOperation();
HVecOperation* b = instruction->InputAt(2)->AsVecOperation();
- DCHECK_EQ(a->GetPackedType(), b->GetPackedType());
+ DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()),
+ HVecOperation::ToSignedType(b->GetPackedType()));
switch (a->GetPackedType()) {
case DataType::Type::kUint8:
case DataType::Type::kInt8:
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index c5a39ff882..7a8c0ad025 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -91,17 +91,61 @@ void InstructionCodeGeneratorMIPS::VisitVecReplicateScalar(HVecReplicateScalar*
}
void LocationsBuilderMIPS::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void InstructionCodeGeneratorMIPS::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister src = VectorRegisterFrom(locations->InAt(0));
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Copy_sW(locations->Out().AsRegister<Register>(), src, 0);
+ break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Mfc1(locations->Out().AsRegisterPairLow<Register>(),
+ locations->InAt(0).AsFpuRegister<FRegister>());
+ __ MoveFromFpuHigh(locations->Out().AsRegisterPairHigh<Register>(),
+ locations->InAt(0).AsFpuRegister<FRegister>());
+ break;
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 4u);
+ DCHECK(locations->InAt(0).Equals(locations->Out())); // no code required
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
// Helper to set up locations for vector unary operations.
static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
LocationSummary* locations = new (allocator) LocationSummary(instruction);
- switch (instruction->GetPackedType()) {
+ DataType::Type type = instruction->GetPackedType();
+ switch (type) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(),
@@ -118,7 +162,8 @@ static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation
case DataType::Type::kFloat64:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(),
- (instruction->IsVecNeg() || instruction->IsVecAbs())
+ (instruction->IsVecNeg() || instruction->IsVecAbs() ||
+ (instruction->IsVecReduce() && type == DataType::Type::kInt64))
? Location::kOutputOverlap
: Location::kNoOutputOverlap);
break;
@@ -133,7 +178,54 @@ void LocationsBuilderMIPS::VisitVecReduce(HVecReduce* instruction) {
}
void InstructionCodeGeneratorMIPS::VisitVecReduce(HVecReduce* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister src = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ VectorRegister tmp = static_cast<VectorRegister>(FTMP);
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ switch (instruction->GetKind()) {
+ case HVecReduce::kSum:
+ __ Hadd_sD(tmp, src, src);
+ __ IlvlD(dst, tmp, tmp);
+ __ AddvW(dst, dst, tmp);
+ break;
+ case HVecReduce::kMin:
+ __ IlvodW(tmp, src, src);
+ __ Min_sW(tmp, src, tmp);
+ __ IlvlW(dst, tmp, tmp);
+ __ Min_sW(dst, dst, tmp);
+ break;
+ case HVecReduce::kMax:
+ __ IlvodW(tmp, src, src);
+ __ Max_sW(tmp, src, tmp);
+ __ IlvlW(dst, tmp, tmp);
+ __ Max_sW(dst, dst, tmp);
+ break;
+ }
+ break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ switch (instruction->GetKind()) {
+ case HVecReduce::kSum:
+ __ IlvlD(dst, src, src);
+ __ AddvD(dst, dst, src);
+ break;
+ case HVecReduce::kMin:
+ __ IlvlD(dst, src, src);
+ __ Min_sD(dst, dst, src);
+ break;
+ case HVecReduce::kMax:
+ __ IlvlD(dst, src, src);
+ __ Max_sD(dst, dst, src);
+ break;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS::VisitVecCnv(HVecCnv* instruction) {
@@ -831,11 +923,79 @@ void InstructionCodeGeneratorMIPS::VisitVecUShr(HVecUShr* instruction) {
}
void LocationsBuilderMIPS::VisitVecSetScalars(HVecSetScalars* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+
+ DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
+
+ HInstruction* input = instruction->InputAt(0);
+ bool is_zero = IsZeroBitPattern(input);
+
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
+ locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant())
+ : Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant())
+ : Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void InstructionCodeGeneratorMIPS::VisitVecSetScalars(HVecSetScalars* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+
+ DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
+
+ // Zero out all other elements first.
+ __ FillW(dst, ZERO);
+
+ // Shorthand for any type of zero.
+ if (IsZeroBitPattern(instruction->InputAt(0))) {
+ return;
+ }
+
+ // Set required elements.
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ InsertB(dst, locations->InAt(0).AsRegister<Register>(), 0);
+ break;
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ InsertH(dst, locations->InAt(0).AsRegister<Register>(), 0);
+ break;
+ case DataType::Type::kInt32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ InsertW(dst, locations->InAt(0).AsRegister<Register>(), 0);
+ break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Mtc1(locations->InAt(0).AsRegisterPairLow<Register>(),
+ locations->Out().AsFpuRegister<FRegister>());
+ __ MoveToFpuHigh(locations->InAt(0).AsRegisterPairHigh<Register>(),
+ locations->Out().AsFpuRegister<FRegister>());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
// Helper to set up locations for vector accumulations.
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index e606df2158..0c59b7344a 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -94,17 +94,58 @@ void InstructionCodeGeneratorMIPS64::VisitVecReplicateScalar(HVecReplicateScalar
}
void LocationsBuilderMIPS64::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void InstructionCodeGeneratorMIPS64::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister src = VectorRegisterFrom(locations->InAt(0));
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Copy_sW(locations->Out().AsRegister<GpuRegister>(), src, 0);
+ break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Copy_sD(locations->Out().AsRegister<GpuRegister>(), src, 0);
+ break;
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ DCHECK_LE(2u, instruction->GetVectorLength());
+ DCHECK_LE(instruction->GetVectorLength(), 4u);
+ DCHECK(locations->InAt(0).Equals(locations->Out())); // no code required
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
// Helper to set up locations for vector unary operations.
static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
LocationSummary* locations = new (allocator) LocationSummary(instruction);
- switch (instruction->GetPackedType()) {
+ DataType::Type type = instruction->GetPackedType();
+ switch (type) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(),
@@ -121,7 +162,8 @@ static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation
case DataType::Type::kFloat64:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(),
- (instruction->IsVecNeg() || instruction->IsVecAbs())
+ (instruction->IsVecNeg() || instruction->IsVecAbs() ||
+ (instruction->IsVecReduce() && type == DataType::Type::kInt64))
? Location::kOutputOverlap
: Location::kNoOutputOverlap);
break;
@@ -136,7 +178,54 @@ void LocationsBuilderMIPS64::VisitVecReduce(HVecReduce* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitVecReduce(HVecReduce* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister src = VectorRegisterFrom(locations->InAt(0));
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+ VectorRegister tmp = static_cast<VectorRegister>(FTMP);
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ switch (instruction->GetKind()) {
+ case HVecReduce::kSum:
+ __ Hadd_sD(tmp, src, src);
+ __ IlvlD(dst, tmp, tmp);
+ __ AddvW(dst, dst, tmp);
+ break;
+ case HVecReduce::kMin:
+ __ IlvodW(tmp, src, src);
+ __ Min_sW(tmp, src, tmp);
+ __ IlvlW(dst, tmp, tmp);
+ __ Min_sW(dst, dst, tmp);
+ break;
+ case HVecReduce::kMax:
+ __ IlvodW(tmp, src, src);
+ __ Max_sW(tmp, src, tmp);
+ __ IlvlW(dst, tmp, tmp);
+ __ Max_sW(dst, dst, tmp);
+ break;
+ }
+ break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ switch (instruction->GetKind()) {
+ case HVecReduce::kSum:
+ __ IlvlD(dst, src, src);
+ __ AddvD(dst, dst, src);
+ break;
+ case HVecReduce::kMin:
+ __ IlvlD(dst, src, src);
+ __ Min_sD(dst, dst, src);
+ break;
+ case HVecReduce::kMax:
+ __ IlvlD(dst, src, src);
+ __ Max_sD(dst, dst, src);
+ break;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderMIPS64::VisitVecCnv(HVecCnv* instruction) {
@@ -835,11 +924,76 @@ void InstructionCodeGeneratorMIPS64::VisitVecUShr(HVecUShr* instruction) {
}
void LocationsBuilderMIPS64::VisitVecSetScalars(HVecSetScalars* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+
+ DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
+
+ HInstruction* input = instruction->InputAt(0);
+ bool is_zero = IsZeroBitPattern(input);
+
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
+ locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant())
+ : Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant())
+ : Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void InstructionCodeGeneratorMIPS64::VisitVecSetScalars(HVecSetScalars* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VectorRegister dst = VectorRegisterFrom(locations->Out());
+
+ DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
+
+ // Zero out all other elements first.
+ __ FillW(dst, ZERO);
+
+ // Shorthand for any type of zero.
+ if (IsZeroBitPattern(instruction->InputAt(0))) {
+ return;
+ }
+
+ // Set required elements.
+ switch (instruction->GetPackedType()) {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ InsertB(dst, locations->InAt(0).AsRegister<GpuRegister>(), 0);
+ break;
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ InsertH(dst, locations->InAt(0).AsRegister<GpuRegister>(), 0);
+ break;
+ case DataType::Type::kInt32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ InsertW(dst, locations->InAt(0).AsRegister<GpuRegister>(), 0);
+ break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ InsertD(dst, locations->InAt(0).AsRegister<GpuRegister>(), 0);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
// Helper to set up locations for vector accumulations.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 39a07b82d1..44614e1630 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -144,7 +144,8 @@ class BoundsCheckSlowPathX86 : public SlowPathCode {
InvokeRuntimeCallingConvention calling_convention;
if (array_length->IsArrayLength() && array_length->IsEmittedAtUseSite()) {
// Load the array length into our temporary.
- uint32_t len_offset = CodeGenerator::GetArrayLengthOffset(array_length->AsArrayLength());
+ HArrayLength* length = array_length->AsArrayLength();
+ uint32_t len_offset = CodeGenerator::GetArrayLengthOffset(length);
Location array_loc = array_length->GetLocations()->InAt(0);
Address array_len(array_loc.AsRegister<Register>(), len_offset);
length_loc = Location::RegisterLocation(calling_convention.GetRegisterAt(1));
@@ -154,7 +155,7 @@ class BoundsCheckSlowPathX86 : public SlowPathCode {
length_loc = Location::RegisterLocation(calling_convention.GetRegisterAt(2));
}
__ movl(length_loc.AsRegister<Register>(), array_len);
- if (mirror::kUseStringCompression) {
+ if (mirror::kUseStringCompression && length->IsStringLength()) {
__ shrl(length_loc.AsRegister<Register>(), Immediate(1));
}
}
@@ -3581,7 +3582,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr
GenerateDivRemWithAnyConstant(instruction);
}
} else {
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) DivRemMinusOneSlowPathX86(
instruction, out.AsRegister<Register>(), is_div);
codegen_->AddSlowPath(slow_path);
@@ -3817,7 +3818,8 @@ void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
}
void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86(instruction);
+ SlowPathCode* slow_path =
+ new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -4756,10 +4758,11 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
Register base = base_loc.AsRegister<Register>();
Location out = locations->Out();
bool is_volatile = field_info.IsVolatile();
- DataType::Type field_type = field_info.GetFieldType();
+ DCHECK_EQ(DataType::Size(field_info.GetFieldType()), DataType::Size(instruction->GetType()));
+ DataType::Type load_type = instruction->GetType();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
- switch (field_type) {
+ switch (load_type) {
case DataType::Type::kBool:
case DataType::Type::kUint8: {
__ movzxb(out.AsRegister<Register>(), Address(base, offset));
@@ -4837,11 +4840,11 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
}
case DataType::Type::kVoid:
- LOG(FATAL) << "Unreachable type " << field_type;
+ LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
}
- if (field_type == DataType::Type::kReference || field_type == DataType::Type::kInt64) {
+ if (load_type == DataType::Type::kReference || load_type == DataType::Type::kInt64) {
// Potential implicit null checks, in the case of reference or
// long fields, are handled in the previous switch statement.
} else {
@@ -4849,7 +4852,7 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
}
if (is_volatile) {
- if (field_type == DataType::Type::kReference) {
+ if (load_type == DataType::Type::kReference) {
// Memory barriers, in the case of references, are also handled
// in the previous switch statement.
} else {
@@ -5149,7 +5152,7 @@ void CodeGeneratorX86::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorX86::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86(instruction);
+ SlowPathCode* slow_path = new (GetScopedAllocator()) NullCheckSlowPathX86(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -5427,7 +5430,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
Location temp_loc = locations->GetTemp(0);
Register temp = temp_loc.AsRegister<Register>();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86(instruction);
+ slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
__ testl(register_value, register_value);
@@ -5618,7 +5621,7 @@ void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
SlowPathCode* slow_path =
- new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86(instruction);
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathX86(instruction);
if (length_loc.IsConstant()) {
int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -5719,7 +5722,8 @@ void InstructionCodeGeneratorX86::GenerateSuspendCheck(HSuspendCheck* instructio
SuspendCheckSlowPathX86* slow_path =
down_cast<SuspendCheckSlowPathX86*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86(instruction, successor);
+ slow_path =
+ new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathX86(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -6076,12 +6080,11 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
}
Label* CodeGeneratorX86::NewJitRootClassPatch(const DexFile& dex_file,
- dex::TypeIndex dex_index,
+ dex::TypeIndex type_index,
Handle<mirror::Class> handle) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
- reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
// Add a patch entry and return the label.
- jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+ jit_class_patches_.emplace_back(dex_file, type_index.index_);
PatchInfo<Label>* info = &jit_class_patches_.back();
return &info->label;
}
@@ -6171,7 +6174,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
@@ -6199,7 +6202,7 @@ void LocationsBuilderX86::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
// We assume the class to not be null.
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
@@ -6261,12 +6264,11 @@ void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
}
Label* CodeGeneratorX86::NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index,
+ dex::StringIndex string_index,
Handle<mirror::String> handle) {
- jit_string_roots_.Overwrite(
- StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
// Add a patch entry and return the label.
- jit_string_patches_.emplace_back(dex_file, dex_index.index_);
+ jit_string_patches_.emplace_back(dex_file, string_index.index_);
PatchInfo<Label>* info = &jit_string_patches_.back();
return &info->label;
}
@@ -6306,7 +6308,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::String> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86(load);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -6587,8 +6589,8 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ cmpl(out, Address(ESP, cls.GetStackIndex()));
}
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -6619,8 +6621,8 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -6712,8 +6714,8 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
SlowPathCode* type_check_slow_path =
- new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
- is_type_check_slow_path_fatal);
+ new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
+ instruction, is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
NearLabel done;
@@ -7156,7 +7158,7 @@ void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(
"have different sizes.");
// Slow path marking the GC root `root`.
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
instruction, root, /* unpoison_ref_before_marking */ false);
codegen_->AddSlowPath(slow_path);
@@ -7286,10 +7288,10 @@ void CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* i
SlowPathCode* slow_path;
if (always_update_field) {
DCHECK(temp != nullptr);
- slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
+ slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp);
} else {
- slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86(
+ slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
instruction, ref, /* unpoison_ref_before_marking */ true);
}
AddSlowPath(slow_path);
@@ -7322,7 +7324,7 @@ void CodeGeneratorX86::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator())
+ SlowPathCode* slow_path = new (GetScopedAllocator())
ReadBarrierForHeapReferenceSlowPathX86(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -7358,7 +7360,7 @@ void CodeGeneratorX86::GenerateReadBarrierForRootSlow(HInstruction* instruction,
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCode* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86(instruction, out, root);
+ new (GetScopedAllocator()) ReadBarrierForRootSlowPathX86(instruction, out, root);
AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
@@ -7810,18 +7812,14 @@ void CodeGeneratorX86::PatchJitRootUse(uint8_t* code,
void CodeGeneratorX86::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
for (const PatchInfo<Label>& info : jit_string_patches_) {
- const auto it = jit_string_roots_.find(
- StringReference(&info.dex_file, dex::StringIndex(info.index)));
- DCHECK(it != jit_string_roots_.end());
- uint64_t index_in_table = it->second;
+ StringReference string_reference(&info.dex_file, dex::StringIndex(info.index));
+ uint64_t index_in_table = GetJitStringRootIndex(string_reference);
PatchJitRootUse(code, roots_data, info, index_in_table);
}
for (const PatchInfo<Label>& info : jit_class_patches_) {
- const auto it = jit_class_roots_.find(
- TypeReference(&info.dex_file, dex::TypeIndex(info.index)));
- DCHECK(it != jit_class_roots_.end());
- uint64_t index_in_table = it->second;
+ TypeReference type_reference(&info.dex_file, dex::TypeIndex(info.index));
+ uint64_t index_in_table = GetJitClassRootIndex(type_reference);
PatchJitRootUse(code, roots_data, info, index_in_table);
}
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index fb61e75d73..176e4dfda0 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -422,10 +422,10 @@ class CodeGeneratorX86 : public CodeGenerator {
void RecordBootStringPatch(HLoadString* load_string);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index,
+ dex::StringIndex string_index,
Handle<mirror::String> handle);
Label* NewJitRootClassPatch(const DexFile& dex_file,
- dex::TypeIndex dex_index,
+ dex::TypeIndex type_index,
Handle<mirror::Class> handle);
void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index c8032c25df..259bb4a9a9 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -195,7 +195,8 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode {
InvokeRuntimeCallingConvention calling_convention;
if (array_length->IsArrayLength() && array_length->IsEmittedAtUseSite()) {
// Load the array length into our temporary.
- uint32_t len_offset = CodeGenerator::GetArrayLengthOffset(array_length->AsArrayLength());
+ HArrayLength* length = array_length->AsArrayLength();
+ uint32_t len_offset = CodeGenerator::GetArrayLengthOffset(length);
Location array_loc = array_length->GetLocations()->InAt(0);
Address array_len(array_loc.AsRegister<CpuRegister>(), len_offset);
length_loc = Location::RegisterLocation(calling_convention.GetRegisterAt(1));
@@ -205,7 +206,7 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode {
length_loc = Location::RegisterLocation(calling_convention.GetRegisterAt(2));
}
__ movl(length_loc.AsRegister<CpuRegister>(), array_len);
- if (mirror::kUseStringCompression) {
+ if (mirror::kUseStringCompression && length->IsStringLength()) {
__ shrl(length_loc.AsRegister<CpuRegister>(), Immediate(1));
}
}
@@ -3649,7 +3650,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* in
}
} else {
SlowPathCode* slow_path =
- new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86_64(
+ new (codegen_->GetScopedAllocator()) DivRemMinusOneSlowPathX86_64(
instruction, out.AsRegister(), type, is_div);
codegen_->AddSlowPath(slow_path);
@@ -3818,7 +3819,7 @@ void LocationsBuilderX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCode* slow_path =
- new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86_64(instruction);
+ new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -4230,10 +4231,11 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
CpuRegister base = base_loc.AsRegister<CpuRegister>();
Location out = locations->Out();
bool is_volatile = field_info.IsVolatile();
- DataType::Type field_type = field_info.GetFieldType();
+ DCHECK_EQ(DataType::Size(field_info.GetFieldType()), DataType::Size(instruction->GetType()));
+ DataType::Type load_type = instruction->GetType();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
- switch (field_type) {
+ switch (load_type) {
case DataType::Type::kBool:
case DataType::Type::kUint8: {
__ movzxb(out.AsRegister<CpuRegister>(), Address(base, offset));
@@ -4300,11 +4302,11 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
}
case DataType::Type::kVoid:
- LOG(FATAL) << "Unreachable type " << field_type;
+ LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
}
- if (field_type == DataType::Type::kReference) {
+ if (load_type == DataType::Type::kReference) {
// Potential implicit null checks, in the case of reference
// fields, are handled in the previous switch statement.
} else {
@@ -4312,7 +4314,7 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
}
if (is_volatile) {
- if (field_type == DataType::Type::kReference) {
+ if (load_type == DataType::Type::kReference) {
// Memory barriers, in the case of references, are also handled
// in the previous switch statement.
} else {
@@ -4602,7 +4604,7 @@ void CodeGeneratorX86_64::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorX86_64::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86_64(instruction);
+ SlowPathCode* slow_path = new (GetScopedAllocator()) NullCheckSlowPathX86_64(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -4864,7 +4866,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
Location temp_loc = locations->GetTemp(0);
CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86_64(instruction);
+ slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
__ testl(register_value, register_value);
@@ -5043,7 +5045,8 @@ void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction)
LocationSummary* locations = instruction->GetLocations();
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86_64(instruction);
+ SlowPathCode* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathX86_64(instruction);
if (length_loc.IsConstant()) {
int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -5164,7 +5167,8 @@ void InstructionCodeGeneratorX86_64::GenerateSuspendCheck(HSuspendCheck* instruc
SuspendCheckSlowPathX86_64* slow_path =
down_cast<SuspendCheckSlowPathX86_64*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86_64(instruction, successor);
+ slow_path =
+ new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathX86_64(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -5468,12 +5472,11 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
}
Label* CodeGeneratorX86_64::NewJitRootClassPatch(const DexFile& dex_file,
- dex::TypeIndex dex_index,
+ dex::TypeIndex type_index,
Handle<mirror::Class> handle) {
- jit_class_roots_.Overwrite(
- TypeReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
// Add a patch entry and return the label.
- jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+ jit_class_patches_.emplace_back(dex_file, type_index.index_);
PatchInfo<Label>* info = &jit_class_patches_.back();
return &info->label;
}
@@ -5561,7 +5564,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -5587,7 +5590,7 @@ void LocationsBuilderX86_64::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class to not be null.
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
@@ -5634,12 +5637,11 @@ void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
}
Label* CodeGeneratorX86_64::NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index,
+ dex::StringIndex string_index,
Handle<mirror::String> handle) {
- jit_string_roots_.Overwrite(
- StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
+ ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
// Add a patch entry and return the label.
- jit_string_patches_.emplace_back(dex_file, dex_index.index_);
+ jit_string_patches_.emplace_back(dex_file, string_index.index_);
PatchInfo<Label>* info = &jit_string_patches_.back();
return &info->label;
}
@@ -5677,7 +5679,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86_64(load);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86_64(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -5967,8 +5969,8 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
__ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
}
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -5999,8 +6001,8 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
- /* is_fatal */ false);
+ slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
+ instruction, /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -6094,8 +6096,8 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
bool is_type_check_slow_path_fatal =
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
SlowPathCode* type_check_slow_path =
- new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
- is_type_check_slow_path_fatal);
+ new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
+ instruction, is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
@@ -6520,7 +6522,7 @@ void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(
"have different sizes.");
// Slow path marking the GC root `root`.
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64(
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
instruction, root, /* unpoison_ref_before_marking */ false);
codegen_->AddSlowPath(slow_path);
@@ -6652,10 +6654,10 @@ void CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction
if (always_update_field) {
DCHECK(temp1 != nullptr);
DCHECK(temp2 != nullptr);
- slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
+ slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2);
} else {
- slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64(
+ slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
instruction, ref, /* unpoison_ref_before_marking */ true);
}
AddSlowPath(slow_path);
@@ -6688,7 +6690,7 @@ void CodeGeneratorX86_64::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCode* slow_path = new (GetGraph()->GetAllocator())
+ SlowPathCode* slow_path = new (GetScopedAllocator())
ReadBarrierForHeapReferenceSlowPathX86_64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -6724,7 +6726,7 @@ void CodeGeneratorX86_64::GenerateReadBarrierForRootSlow(HInstruction* instructi
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCode* slow_path =
- new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
+ new (GetScopedAllocator()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
@@ -7113,18 +7115,14 @@ void CodeGeneratorX86_64::PatchJitRootUse(uint8_t* code,
void CodeGeneratorX86_64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
for (const PatchInfo<Label>& info : jit_string_patches_) {
- const auto it = jit_string_roots_.find(
- StringReference(&info.dex_file, dex::StringIndex(info.index)));
- DCHECK(it != jit_string_roots_.end());
- uint64_t index_in_table = it->second;
+ StringReference string_reference(&info.dex_file, dex::StringIndex(info.index));
+ uint64_t index_in_table = GetJitStringRootIndex(string_reference);
PatchJitRootUse(code, roots_data, info, index_in_table);
}
for (const PatchInfo<Label>& info : jit_class_patches_) {
- const auto it = jit_class_roots_.find(
- TypeReference(&info.dex_file, dex::TypeIndex(info.index)));
- DCHECK(it != jit_class_roots_.end());
- uint64_t index_in_table = it->second;
+ TypeReference type_reference(&info.dex_file, dex::TypeIndex(info.index));
+ uint64_t index_in_table = GetJitClassRootIndex(type_reference);
PatchJitRootUse(code, roots_data, info, index_in_table);
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 6f67a45f25..00c5c27470 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -416,10 +416,10 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void RecordBootStringPatch(HLoadString* load_string);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index,
+ dex::StringIndex string_index,
Handle<mirror::String> handle);
Label* NewJitRootClassPatch(const DexFile& dex_file,
- dex::TypeIndex dex_index,
+ dex::TypeIndex type_index,
Handle<mirror::Class> handle);
void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 3b67efe100..75a7fbe6ca 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -123,7 +123,7 @@ class DataType {
}
static bool IsUnsignedType(Type type) {
- return type == Type::kUint8 || type == Type::kUint16;
+ return type == Type::kBool || type == Type::kUint8 || type == Type::kUint16;
}
// Return the general kind of `type`, fusing integer-like types as Type::kInt.
diff --git a/compiler/optimizing/data_type_test.cc b/compiler/optimizing/data_type_test.cc
index 3ce683ac4d..ca137b7c7c 100644
--- a/compiler/optimizing/data_type_test.cc
+++ b/compiler/optimizing/data_type_test.cc
@@ -75,7 +75,7 @@ TEST(DataType, IsTypeConversionImplicit) {
const ArrayRef<const DataType::Type> kIntegralResultTypes = kIntegralInputTypes.SubArray(1u);
static const bool kImplicitIntegralConversions[][arraysize(kIntegralTypes)] = {
- // Bool Uint8 Int8 Uint16 Int16 Int32 Int64
+ // Bool Uint8 Int8 Uint16 Int16 Int32 Int64
{ /* Bool N/A */ true, true, true, true, true, false },
{ /* Uint8 N/A */ true, false, true, true, true, false },
{ /* Int8 N/A */ false, true, false, true, true, false },
diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc
index 36e932c67a..b63914faf7 100644
--- a/compiler/optimizing/emit_swap_mips_test.cc
+++ b/compiler/optimizing/emit_swap_mips_test.cc
@@ -238,14 +238,14 @@ TEST_F(EmitSwapMipsTest, TwoStackSlots) {
DataType::Type::kInt32,
nullptr);
const char* expected =
- "addiu $sp, $sp, -4\n"
+ "addiu $sp, $sp, -16\n"
"sw $v0, 0($sp)\n"
- "lw $v0, 56($sp)\n"
- "lw $t8, 52($sp)\n"
- "sw $v0, 52($sp)\n"
- "sw $t8, 56($sp)\n"
+ "lw $v0, 68($sp)\n"
+ "lw $t8, 64($sp)\n"
+ "sw $v0, 64($sp)\n"
+ "sw $t8, 68($sp)\n"
"lw $v0, 0($sp)\n"
- "addiu $sp, $sp, 4\n";
+ "addiu $sp, $sp, 16\n";
DriverWrapper(moves_, expected, "TwoStackSlots");
}
@@ -261,18 +261,18 @@ TEST_F(EmitSwapMipsTest, TwoDoubleStackSlots) {
DataType::Type::kInt64,
nullptr);
const char* expected =
- "addiu $sp, $sp, -4\n"
+ "addiu $sp, $sp, -16\n"
"sw $v0, 0($sp)\n"
- "lw $v0, 60($sp)\n"
- "lw $t8, 52($sp)\n"
- "sw $v0, 52($sp)\n"
- "sw $t8, 60($sp)\n"
- "lw $v0, 64($sp)\n"
- "lw $t8, 56($sp)\n"
- "sw $v0, 56($sp)\n"
- "sw $t8, 64($sp)\n"
+ "lw $v0, 72($sp)\n"
+ "lw $t8, 64($sp)\n"
+ "sw $v0, 64($sp)\n"
+ "sw $t8, 72($sp)\n"
+ "lw $v0, 76($sp)\n"
+ "lw $t8, 68($sp)\n"
+ "sw $v0, 68($sp)\n"
+ "sw $t8, 76($sp)\n"
"lw $v0, 0($sp)\n"
- "addiu $sp, $sp, 4\n";
+ "addiu $sp, $sp, 16\n";
DriverWrapper(moves_, expected, "TwoDoubleStackSlots");
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index afe748458e..189d5aea56 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1044,17 +1044,34 @@ void InstructionSimplifierVisitor::VisitArraySet(HArraySet* instruction) {
}
static bool IsTypeConversionLossless(DataType::Type input_type, DataType::Type result_type) {
+ // Make sure all implicit conversions have been simplified and no new ones have been introduced.
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << "," << result_type;
// The conversion to a larger type is loss-less with the exception of two cases,
// - conversion to the unsigned type Uint16, where we may lose some bits, and
// - conversion from float to long, the only FP to integral conversion with smaller FP type.
// For integral to FP conversions this holds because the FP mantissa is large enough.
// Note: The size check excludes Uint8 as the result type.
- DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type));
return DataType::Size(result_type) > DataType::Size(input_type) &&
result_type != DataType::Type::kUint16 &&
!(result_type == DataType::Type::kInt64 && input_type == DataType::Type::kFloat32);
}
+static inline bool TryReplaceFieldOrArrayGetType(HInstruction* maybe_get, DataType::Type new_type) {
+ if (maybe_get->IsInstanceFieldGet()) {
+ maybe_get->AsInstanceFieldGet()->SetType(new_type);
+ return true;
+ } else if (maybe_get->IsStaticFieldGet()) {
+ maybe_get->AsStaticFieldGet()->SetType(new_type);
+ return true;
+ } else if (maybe_get->IsArrayGet() && !maybe_get->AsArrayGet()->IsStringCharAt()) {
+ maybe_get->AsArrayGet()->SetType(new_type);
+ return true;
+ } else {
+ return false;
+ }
+}
+
void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruction) {
HInstruction* input = instruction->GetInput();
DataType::Type input_type = input->GetType();
@@ -1130,6 +1147,18 @@ void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruct
}
}
}
+ } else if (input->HasOnlyOneNonEnvironmentUse() &&
+ ((input_type == DataType::Type::kInt8 && result_type == DataType::Type::kUint8) ||
+ (input_type == DataType::Type::kUint8 && result_type == DataType::Type::kInt8) ||
+ (input_type == DataType::Type::kInt16 && result_type == DataType::Type::kUint16) ||
+ (input_type == DataType::Type::kUint16 && result_type == DataType::Type::kInt16))) {
+ // Try to modify the type of the load to `result_type` and remove the explicit type conversion.
+ if (TryReplaceFieldOrArrayGetType(input, result_type)) {
+ instruction->ReplaceWith(input);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ RecordSimplification();
+ return;
+ }
}
}
@@ -1220,12 +1249,16 @@ void InstructionSimplifierVisitor::VisitAdd(HAdd* instruction) {
}
void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) {
+ DCHECK(DataType::IsIntegralType(instruction->GetType()));
HConstant* input_cst = instruction->GetConstantRight();
HInstruction* input_other = instruction->GetLeastConstantLeft();
if (input_cst != nullptr) {
int64_t value = Int64FromConstant(input_cst);
- if (value == -1) {
+ if (value == -1 ||
+ // Similar cases under zero extension.
+ (DataType::IsUnsignedType(input_other->GetType()) &&
+ ((DataType::MaxValueOfIntegralType(input_other->GetType()) & ~value) == 0))) {
// Replace code looking like
// AND dst, src, 0xFFF...FF
// with
@@ -1293,6 +1326,28 @@ void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) {
return;
}
}
+ if ((value == 0xff || value == 0xffff) && instruction->GetType() != DataType::Type::kInt64) {
+ // Transform AND to a type conversion to Uint8/Uint16. If `input_other` is a field
+ // or array Get with only a single use, short-circuit the subsequent simplification
+ // of the Get+TypeConversion and change the Get's type to `new_type` instead.
+ DataType::Type new_type = (value == 0xff) ? DataType::Type::kUint8 : DataType::Type::kUint16;
+ DataType::Type find_type = (value == 0xff) ? DataType::Type::kInt8 : DataType::Type::kInt16;
+ if (input_other->GetType() == find_type &&
+ input_other->HasOnlyOneNonEnvironmentUse() &&
+ TryReplaceFieldOrArrayGetType(input_other, new_type)) {
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ } else if (DataType::IsTypeConversionImplicit(input_other->GetType(), new_type)) {
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ } else {
+ HTypeConversion* type_conversion = new (GetGraph()->GetAllocator()) HTypeConversion(
+ new_type, input_other, instruction->GetDexPc());
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, type_conversion);
+ }
+ RecordSimplification();
+ return;
+ }
}
// We assume that GVN has run before, so we only perform a pointer comparison.
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index ee07c4f65c..ef85f9ccc4 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1339,7 +1339,7 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) {
SlowPathCodeARM64* slow_path = nullptr;
const bool can_slow_path = invoke->InputAt(1)->CanBeNull();
if (can_slow_path) {
- slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen_->AddSlowPath(slow_path);
__ Cbz(arg, slow_path->GetEntryLabel());
}
@@ -1702,7 +1702,6 @@ void IntrinsicCodeGeneratorARM64::VisitStringEquals(HInvoke* invoke) {
static void GenerateVisitStringIndexOf(HInvoke* invoke,
MacroAssembler* masm,
CodeGeneratorARM64* codegen,
- ArenaAllocator* allocator,
bool start_at_zero) {
LocationSummary* locations = invoke->GetLocations();
@@ -1717,7 +1716,7 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke,
if (static_cast<uint32_t>(code_point->AsIntConstant()->GetValue()) > 0xFFFFU) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
// rare, so for simplicity just put the full slow-path down and branch unconditionally.
- slow_path = new (allocator) IntrinsicSlowPathARM64(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -1726,7 +1725,7 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke,
} else if (code_point->GetType() != DataType::Type::kUint16) {
Register char_reg = WRegisterFrom(locations->InAt(1));
__ Tst(char_reg, 0xFFFF0000);
- slow_path = new (allocator) IntrinsicSlowPathARM64(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
}
@@ -1760,8 +1759,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateVisitStringIndexOf(
- invoke, GetVIXLAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+ GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ true);
}
void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1777,8 +1775,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateVisitStringIndexOf(
- invoke, GetVIXLAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+ GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ false);
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1798,7 +1795,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromBytes(HInvoke* invoke)
Register byte_array = WRegisterFrom(locations->InAt(0));
__ Cmp(byte_array, 0);
- SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ SlowPathCodeARM64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen_->AddSlowPath(slow_path);
__ B(eq, slow_path->GetEntryLabel());
@@ -1842,7 +1840,8 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromString(HInvoke* invoke
Register string_to_copy = WRegisterFrom(locations->InAt(0));
__ Cmp(string_to_copy, 0);
- SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ SlowPathCodeARM64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen_->AddSlowPath(slow_path);
__ B(eq, slow_path->GetEntryLabel());
@@ -2285,7 +2284,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopyChar(HInvoke* invoke) {
Location dst_pos = locations->InAt(3);
Location length = locations->InAt(4);
- SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ SlowPathCodeARM64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen_->AddSlowPath(slow_path);
// If source and destination are the same, take the slow path. Overlapping copy regions must be
@@ -2462,7 +2462,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
Register temp2 = WRegisterFrom(locations->GetTemp(1));
Location temp2_loc = LocationFrom(temp2);
- SlowPathCodeARM64* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ SlowPathCodeARM64* intrinsic_slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
codegen_->AddSlowPath(intrinsic_slow_path);
vixl::aarch64::Label conditions_on_positions_validated;
@@ -2839,7 +2840,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
// Slow path used to copy array when `src` is gray.
SlowPathCodeARM64* read_barrier_slow_path =
- new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARM64(invoke, LocationFrom(tmp));
+ new (codegen_->GetScopedAllocator()) ReadBarrierSystemArrayCopySlowPathARM64(
+ invoke, LocationFrom(tmp));
codegen_->AddSlowPath(read_barrier_slow_path);
// Given the numeric representation, it's enough to check the low bit of the rb_state.
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 332306bebf..e0874d9549 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -1490,7 +1490,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
SlowPathCodeARMVIXL* slow_path = nullptr;
const bool can_slow_path = invoke->InputAt(1)->CanBeNull();
if (can_slow_path) {
- slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+ slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen_->AddSlowPath(slow_path);
__ CompareAndBranchIfZero(arg, slow_path->GetEntryLabel());
}
@@ -1916,7 +1916,6 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
static void GenerateVisitStringIndexOf(HInvoke* invoke,
ArmVIXLAssembler* assembler,
CodeGeneratorARMVIXL* codegen,
- ArenaAllocator* allocator,
bool start_at_zero) {
LocationSummary* locations = invoke->GetLocations();
@@ -1932,7 +1931,7 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke,
std::numeric_limits<uint16_t>::max()) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
// rare, so for simplicity just put the full slow-path down and branch unconditionally.
- slow_path = new (allocator) IntrinsicSlowPathARMVIXL(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -1942,7 +1941,7 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke,
vixl32::Register char_reg = InputRegisterAt(invoke, 1);
// 0xffff is not modified immediate but 0x10000 is, so use `>= 0x10000` instead of `> 0xffff`.
__ Cmp(char_reg, static_cast<uint32_t>(std::numeric_limits<uint16_t>::max()) + 1);
- slow_path = new (allocator) IntrinsicSlowPathARMVIXL(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen->AddSlowPath(slow_path);
__ B(hs, slow_path->GetEntryLabel());
}
@@ -1977,8 +1976,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
- GenerateVisitStringIndexOf(
- invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+ GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1994,8 +1992,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke)
}
void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateVisitStringIndexOf(
- invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+ GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -2013,7 +2010,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromBytes(HInvoke* invok
ArmVIXLAssembler* assembler = GetAssembler();
vixl32::Register byte_array = InputRegisterAt(invoke, 0);
__ Cmp(byte_array, 0);
- SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+ SlowPathCodeARMVIXL* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen_->AddSlowPath(slow_path);
__ B(eq, slow_path->GetEntryLabel());
@@ -2055,7 +2053,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromString(HInvoke* invo
ArmVIXLAssembler* assembler = GetAssembler();
vixl32::Register string_to_copy = InputRegisterAt(invoke, 0);
__ Cmp(string_to_copy, 0);
- SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+ SlowPathCodeARMVIXL* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen_->AddSlowPath(slow_path);
__ B(eq, slow_path->GetEntryLabel());
@@ -2190,7 +2189,8 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
Location temp3_loc = locations->GetTemp(2);
vixl32::Register temp3 = RegisterFrom(temp3_loc);
- SlowPathCodeARMVIXL* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+ SlowPathCodeARMVIXL* intrinsic_slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen_->AddSlowPath(intrinsic_slow_path);
vixl32::Label conditions_on_positions_validated;
@@ -2496,7 +2496,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
// Note that the base destination address is computed in `temp2`
// by the slow path code.
SlowPathCodeARMVIXL* read_barrier_slow_path =
- new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARMVIXL(invoke);
+ new (codegen_->GetScopedAllocator()) ReadBarrierSystemArrayCopySlowPathARMVIXL(invoke);
codegen_->AddSlowPath(read_barrier_slow_path);
// Given the numeric representation, it's enough to check the low bit of the
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 5f2f71bd4d..4a8fbf26ce 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2053,7 +2053,7 @@ void IntrinsicCodeGeneratorMIPS::VisitStringCompareTo(HInvoke* invoke) {
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
Register argument = locations->InAt(1).AsRegister<Register>();
- SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+ SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqz(argument, slow_path->GetEntryLabel());
codegen_->InvokeRuntime(kQuickStringCompareTo, invoke, invoke->GetDexPc(), slow_path);
@@ -2185,8 +2185,7 @@ void IntrinsicCodeGeneratorMIPS::VisitStringEquals(HInvoke* invoke) {
static void GenerateStringIndexOf(HInvoke* invoke,
bool start_at_zero,
MipsAssembler* assembler,
- CodeGeneratorMIPS* codegen,
- ArenaAllocator* allocator) {
+ CodeGeneratorMIPS* codegen) {
LocationSummary* locations = invoke->GetLocations();
Register tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<Register>() : TMP;
@@ -2202,7 +2201,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// Always needs the slow-path. We could directly dispatch to it,
// but this case should be rare, so for simplicity just put the
// full slow-path down and branch unconditionally.
- slow_path = new (allocator) IntrinsicSlowPathMIPS(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -2219,7 +2218,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// two halfwords so we fallback to using the generic implementation
// of indexOf().
__ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
- slow_path = new (allocator) IntrinsicSlowPathMIPS(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen->AddSlowPath(slow_path);
__ Bltu(tmp_reg, char_reg, slow_path->GetEntryLabel());
}
@@ -2253,11 +2252,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke,
- /* start_at_zero */ true,
- GetAssembler(),
- codegen_,
- GetAllocator());
+ GenerateStringIndexOf(invoke, /* start_at_zero */ true, GetAssembler(), codegen_);
}
// int java.lang.String.indexOf(int ch, int fromIndex)
@@ -2278,11 +2273,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke,
- /* start_at_zero */ false,
- GetAssembler(),
- codegen_,
- GetAllocator());
+ GenerateStringIndexOf(invoke, /* start_at_zero */ false, GetAssembler(), codegen_);
}
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -2303,7 +2294,7 @@ void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromBytes(HInvoke* invoke)
LocationSummary* locations = invoke->GetLocations();
Register byte_array = locations->InAt(0).AsRegister<Register>();
- SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+ SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqz(byte_array, slow_path->GetEntryLabel());
codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc(), slow_path);
@@ -2347,7 +2338,7 @@ void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromString(HInvoke* invoke)
LocationSummary* locations = invoke->GetLocations();
Register string_to_copy = locations->InAt(0).AsRegister<Register>();
- SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+ SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqz(string_to_copy, slow_path->GetEntryLabel());
codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc());
@@ -3059,7 +3050,7 @@ void IntrinsicCodeGeneratorMIPS::VisitSystemArrayCopyChar(HInvoke* invoke) {
Register src_base = locations->GetTemp(1).AsRegister<Register>();
Register count = locations->GetTemp(2).AsRegister<Register>();
- SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+ SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
codegen_->AddSlowPath(slow_path);
// Bail out if the source and destination are the same (to handle overlap).
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 8d5be80202..512fb68fad 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1626,7 +1626,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringCompareTo(HInvoke* invoke) {
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
GpuRegister argument = locations->InAt(1).AsRegister<GpuRegister>();
- SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ SlowPathCodeMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqzc(argument, slow_path->GetEntryLabel());
@@ -1754,7 +1755,6 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringEquals(HInvoke* invoke) {
static void GenerateStringIndexOf(HInvoke* invoke,
Mips64Assembler* assembler,
CodeGeneratorMIPS64* codegen,
- ArenaAllocator* allocator,
bool start_at_zero) {
LocationSummary* locations = invoke->GetLocations();
GpuRegister tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<GpuRegister>() : TMP;
@@ -1771,7 +1771,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// Always needs the slow-path. We could directly dispatch to it,
// but this case should be rare, so for simplicity just put the
// full slow-path down and branch unconditionally.
- slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
codegen->AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -1780,7 +1780,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
} else if (code_point->GetType() != DataType::Type::kUint16) {
GpuRegister char_reg = locations->InAt(1).AsRegister<GpuRegister>();
__ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
- slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
codegen->AddSlowPath(slow_path);
__ Bltuc(tmp_reg, char_reg, slow_path->GetEntryLabel()); // UTF-16 required
}
@@ -1816,7 +1816,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
}
// int java.lang.String.indexOf(int ch, int fromIndex)
@@ -1834,8 +1834,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(
- invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
}
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -1856,7 +1855,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke
LocationSummary* locations = invoke->GetLocations();
GpuRegister byte_array = locations->InAt(0).AsRegister<GpuRegister>();
- SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ SlowPathCodeMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqzc(byte_array, slow_path->GetEntryLabel());
@@ -1903,7 +1903,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromString(HInvoke* invok
LocationSummary* locations = invoke->GetLocations();
GpuRegister string_to_copy = locations->InAt(0).AsRegister<GpuRegister>();
- SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ SlowPathCodeMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
codegen_->AddSlowPath(slow_path);
__ Beqzc(string_to_copy, slow_path->GetEntryLabel());
@@ -2160,7 +2161,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitSystemArrayCopyChar(HInvoke* invoke) {
GpuRegister src_base = locations->GetTemp(1).AsRegister<GpuRegister>();
GpuRegister count = locations->GetTemp(2).AsRegister<GpuRegister>();
- SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ SlowPathCodeMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
codegen_->AddSlowPath(slow_path);
// Bail out if the source and destination are the same (to handle overlap).
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 8b389ba876..8a0b6aeb0e 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1253,7 +1253,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopyChar(HInvoke* invoke) {
Register count = locations->GetTemp(2).AsRegister<Register>();
DCHECK_EQ(count, ECX);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(slow_path);
// Bail out if the source and destination are the same (to handle overlap).
@@ -1336,7 +1336,7 @@ void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) {
Register argument = locations->InAt(1).AsRegister<Register>();
__ testl(argument, argument);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -1485,7 +1485,6 @@ static void CreateStringIndexOfLocations(HInvoke* invoke,
static void GenerateStringIndexOf(HInvoke* invoke,
X86Assembler* assembler,
CodeGeneratorX86* codegen,
- ArenaAllocator* allocator,
bool start_at_zero) {
LocationSummary* locations = invoke->GetLocations();
@@ -1515,7 +1514,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
std::numeric_limits<uint16_t>::max()) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
// rare, so for simplicity just put the full slow-path down and branch unconditionally.
- slow_path = new (allocator) IntrinsicSlowPathX86(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -1523,7 +1522,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
}
} else if (code_point->GetType() != DataType::Type::kUint16) {
__ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max()));
- slow_path = new (allocator) IntrinsicSlowPathX86(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen->AddSlowPath(slow_path);
__ j(kAbove, slow_path->GetEntryLabel());
}
@@ -1640,7 +1639,7 @@ void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1648,8 +1647,7 @@ void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(
- invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1669,7 +1667,7 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
Register byte_array = locations->InAt(0).AsRegister<Register>();
__ testl(byte_array, byte_array);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -1713,7 +1711,7 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromString(HInvoke* invoke)
Register string_to_copy = locations->InAt(0).AsRegister<Register>();
__ testl(string_to_copy, string_to_copy);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -2901,7 +2899,8 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
Location temp2_loc = locations->GetTemp(1);
Register temp2 = temp2_loc.AsRegister<Register>();
- SlowPathCode* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ SlowPathCode* intrinsic_slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(intrinsic_slow_path);
NearLabel conditions_on_positions_validated;
@@ -3215,7 +3214,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
// Slow path used to copy array when `src` is gray.
SlowPathCode* read_barrier_slow_path =
- new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathX86(invoke);
+ new (codegen_->GetScopedAllocator()) ReadBarrierSystemArrayCopySlowPathX86(invoke);
codegen_->AddSlowPath(read_barrier_slow_path);
// We have done the "if" of the gray bit check above, now branch based on the flags.
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 6337900b71..92ffda427b 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1033,7 +1033,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
CpuRegister count = locations->GetTemp(2).AsRegister<CpuRegister>();
DCHECK_EQ(count.AsRegister(), RCX);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen_->AddSlowPath(slow_path);
// Bail out if the source and destination are the same.
@@ -1175,7 +1175,8 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
CpuRegister temp3 = temp3_loc.AsRegister<CpuRegister>();
Location TMP_loc = Location::RegisterLocation(TMP);
- SlowPathCode* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ SlowPathCode* intrinsic_slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen_->AddSlowPath(intrinsic_slow_path);
NearLabel conditions_on_positions_validated;
@@ -1449,7 +1450,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
// Slow path used to copy array when `src` is gray.
SlowPathCode* read_barrier_slow_path =
- new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathX86_64(invoke);
+ new (codegen_->GetScopedAllocator()) ReadBarrierSystemArrayCopySlowPathX86_64(invoke);
codegen_->AddSlowPath(read_barrier_slow_path);
// We have done the "if" of the gray bit check above, now branch based on the flags.
@@ -1510,7 +1511,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCompareTo(HInvoke* invoke) {
CpuRegister argument = locations->InAt(1).AsRegister<CpuRegister>();
__ testl(argument, argument);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -1655,7 +1656,6 @@ static void CreateStringIndexOfLocations(HInvoke* invoke,
static void GenerateStringIndexOf(HInvoke* invoke,
X86_64Assembler* assembler,
CodeGeneratorX86_64* codegen,
- ArenaAllocator* allocator,
bool start_at_zero) {
LocationSummary* locations = invoke->GetLocations();
@@ -1683,7 +1683,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
std::numeric_limits<uint16_t>::max()) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
// rare, so for simplicity just put the full slow-path down and branch unconditionally.
- slow_path = new (allocator) IntrinsicSlowPathX86_64(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -1691,7 +1691,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
}
} else if (code_point->GetType() != DataType::Type::kUint16) {
__ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max()));
- slow_path = new (allocator) IntrinsicSlowPathX86_64(invoke);
+ slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen->AddSlowPath(slow_path);
__ j(kAbove, slow_path->GetEntryLabel());
}
@@ -1800,7 +1800,7 @@ void IntrinsicLocationsBuilderX86_64::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1808,8 +1808,7 @@ void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(
- invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1829,7 +1828,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromBytes(HInvoke* invoke
CpuRegister byte_array = locations->InAt(0).AsRegister<CpuRegister>();
__ testl(byte_array, byte_array);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -1873,7 +1872,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromString(HInvoke* invok
CpuRegister string_to_copy = locations->InAt(0).AsRegister<CpuRegister>();
__ testl(string_to_copy, string_to_copy);
- SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index af5585ec92..7dff696e32 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -39,13 +39,14 @@ static HInstruction* const kUnknownHeapValue =
static HInstruction* const kDefaultHeapValue =
reinterpret_cast<HInstruction*>(static_cast<uintptr_t>(-2));
-class LSEVisitor : public HGraphVisitor {
+// Use HGraphDelegateVisitor for which all VisitInvokeXXX() delegate to VisitInvoke().
+class LSEVisitor : public HGraphDelegateVisitor {
public:
LSEVisitor(HGraph* graph,
const HeapLocationCollector& heap_locations_collector,
const SideEffectsAnalysis& side_effects,
OptimizingCompilerStats* stats)
- : HGraphVisitor(graph, stats),
+ : HGraphDelegateVisitor(graph, stats),
heap_location_collector_(heap_locations_collector),
side_effects_(side_effects),
allocator_(graph->GetArenaStack()),
@@ -540,23 +541,7 @@ class LSEVisitor : public HGraphVisitor {
}
}
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+ void VisitInvoke(HInvoke* invoke) OVERRIDE {
HandleInvoke(invoke);
}
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 69c6b94c6b..8f84796ff4 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -123,7 +123,7 @@ static bool IsSignExtensionAndGet(HInstruction* instruction,
/*out*/ HInstruction** operand) {
// Accept any already wider constant that would be handled properly by sign
// extension when represented in the *width* of the given narrower data type
- // (the fact that Uint16 normally zero extends does not matter here).
+ // (the fact that Uint8/Uint16 normally zero extend does not matter here).
int64_t value = 0;
if (IsInt64AndGet(instruction, /*out*/ &value)) {
switch (type) {
@@ -221,31 +221,6 @@ static bool IsZeroExtensionAndGet(HInstruction* instruction,
return false;
}
}
- // A sign (or zero) extension followed by an explicit removal of just the
- // higher sign bits is equivalent to a zero extension of the underlying operand.
- //
- // TODO: move this into simplifier and use new type system instead.
- //
- if (instruction->IsAnd()) {
- int64_t mask = 0;
- HInstruction* a = instruction->InputAt(0);
- HInstruction* b = instruction->InputAt(1);
- // In (a & b) find (mask & b) or (a & mask) with sign or zero extension on the non-mask.
- if ((IsInt64AndGet(a, /*out*/ &mask) && (IsSignExtensionAndGet(b, type, /*out*/ operand) ||
- IsZeroExtensionAndGet(b, type, /*out*/ operand))) ||
- (IsInt64AndGet(b, /*out*/ &mask) && (IsSignExtensionAndGet(a, type, /*out*/ operand) ||
- IsZeroExtensionAndGet(a, type, /*out*/ operand)))) {
- switch ((*operand)->GetType()) {
- case DataType::Type::kUint8:
- case DataType::Type::kInt8:
- return mask == std::numeric_limits<uint8_t>::max();
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- return mask == std::numeric_limits<uint16_t>::max();
- default: return false;
- }
- }
- }
// An explicit widening conversion of an unsigned expression zero-extends.
if (instruction->IsTypeConversion()) {
HInstruction* conv = instruction->InputAt(0);
@@ -277,10 +252,15 @@ static bool IsNarrowerOperands(HInstruction* a,
/*out*/ HInstruction** r,
/*out*/ HInstruction** s,
/*out*/ bool* is_unsigned) {
- if (IsSignExtensionAndGet(a, type, r) && IsSignExtensionAndGet(b, type, s)) {
+ // Look for a matching sign extension.
+ DataType::Type stype = HVecOperation::ToSignedType(type);
+ if (IsSignExtensionAndGet(a, stype, r) && IsSignExtensionAndGet(b, stype, s)) {
*is_unsigned = false;
return true;
- } else if (IsZeroExtensionAndGet(a, type, r) && IsZeroExtensionAndGet(b, type, s)) {
+ }
+ // Look for a matching zero extension.
+ DataType::Type utype = HVecOperation::ToUnsignedType(type);
+ if (IsZeroExtensionAndGet(a, utype, r) && IsZeroExtensionAndGet(b, utype, s)) {
*is_unsigned = true;
return true;
}
@@ -292,10 +272,15 @@ static bool IsNarrowerOperand(HInstruction* a,
DataType::Type type,
/*out*/ HInstruction** r,
/*out*/ bool* is_unsigned) {
- if (IsSignExtensionAndGet(a, type, r)) {
+ // Look for a matching sign extension.
+ DataType::Type stype = HVecOperation::ToSignedType(type);
+ if (IsSignExtensionAndGet(a, stype, r)) {
*is_unsigned = false;
return true;
- } else if (IsZeroExtensionAndGet(a, type, r)) {
+ }
+ // Look for a matching zero extension.
+ DataType::Type utype = HVecOperation::ToUnsignedType(type);
+ if (IsZeroExtensionAndGet(a, utype, r)) {
*is_unsigned = true;
return true;
}
@@ -1162,7 +1147,6 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
size_t size_vec = DataType::Size(type);
size_t size_from = DataType::Size(from);
size_t size_to = DataType::Size(to);
- DataType::Type ctype = size_from == size_vec ? from : type;
// Accept an integral conversion
// (1a) narrowing into vector type, "wider" operations cannot bring in higher order bits, or
// (1b) widening from at least vector type, and
@@ -1172,7 +1156,7 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
VectorizeUse(node, opa, generate_code, type, restrictions | kNoHiBits)) ||
(size_to >= size_from &&
size_from >= size_vec &&
- VectorizeUse(node, opa, generate_code, ctype, restrictions))) {
+ VectorizeUse(node, opa, generate_code, type, restrictions))) {
if (generate_code) {
if (vector_mode_ == kVector) {
vector_map_->Put(instruction, vector_map_->Get(opa)); // operand pass-through
@@ -1438,10 +1422,10 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
*restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoSAD;
return TrySetVectorLength(8);
case DataType::Type::kInt32:
- *restrictions |= kNoDiv | kNoReduction | kNoSAD;
+ *restrictions |= kNoDiv | kNoSAD;
return TrySetVectorLength(4);
case DataType::Type::kInt64:
- *restrictions |= kNoDiv | kNoReduction | kNoSAD;
+ *restrictions |= kNoDiv | kNoSAD;
return TrySetVectorLength(2);
case DataType::Type::kFloat32:
*restrictions |= kNoMinMax | kNoReduction; // min/max(x, NaN)
@@ -1467,10 +1451,10 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
*restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoSAD;
return TrySetVectorLength(8);
case DataType::Type::kInt32:
- *restrictions |= kNoDiv | kNoReduction | kNoSAD;
+ *restrictions |= kNoDiv | kNoSAD;
return TrySetVectorLength(4);
case DataType::Type::kInt64:
- *restrictions |= kNoDiv | kNoReduction | kNoSAD;
+ *restrictions |= kNoDiv | kNoSAD;
return TrySetVectorLength(2);
case DataType::Type::kFloat32:
*restrictions |= kNoMinMax | kNoReduction; // min/max(x, NaN)
@@ -1578,12 +1562,13 @@ void HLoopOptimization::GenerateVecMem(HInstruction* org,
// Scalar store or load.
DCHECK(vector_mode_ == kSequential);
if (opb != nullptr) {
+ DataType::Type component_type = org->AsArraySet()->GetComponentType();
vector = new (global_allocator_) HArraySet(
- org->InputAt(0), opa, opb, type, org->GetSideEffects(), dex_pc);
+ org->InputAt(0), opa, opb, component_type, org->GetSideEffects(), dex_pc);
} else {
bool is_string_char_at = org->AsArrayGet()->IsStringCharAt();
vector = new (global_allocator_) HArrayGet(
- org->InputAt(0), opa, type, org->GetSideEffects(), dex_pc, is_string_char_at);
+ org->InputAt(0), opa, org->GetType(), org->GetSideEffects(), dex_pc, is_string_char_at);
}
}
vector_map_->Put(org, vector);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 75cdb3ee5e..88609ea790 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -5345,6 +5345,13 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
+ void SetType(DataType::Type new_type) {
+ DCHECK(DataType::IsIntegralType(GetType()));
+ DCHECK(DataType::IsIntegralType(new_type));
+ DCHECK_EQ(DataType::Size(GetType()), DataType::Size(new_type));
+ SetPackedField<TypeField>(new_type);
+ }
+
DECLARE_INSTRUCTION(InstanceFieldGet);
private:
@@ -5468,6 +5475,13 @@ class HArrayGet FINAL : public HExpression<2> {
HInstruction* GetArray() const { return InputAt(0); }
HInstruction* GetIndex() const { return InputAt(1); }
+ void SetType(DataType::Type new_type) {
+ DCHECK(DataType::IsIntegralType(GetType()));
+ DCHECK(DataType::IsIntegralType(new_type));
+ DCHECK_EQ(DataType::Size(GetType()), DataType::Size(new_type));
+ SetPackedField<TypeField>(new_type);
+ }
+
DECLARE_INSTRUCTION(ArrayGet);
private:
@@ -6142,6 +6156,13 @@ class HStaticFieldGet FINAL : public HExpression<1> {
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
+ void SetType(DataType::Type new_type) {
+ DCHECK(DataType::IsIntegralType(GetType()));
+ DCHECK(DataType::IsIntegralType(new_type));
+ DCHECK_EQ(DataType::Size(GetType()), DataType::Size(new_type));
+ SetPackedField<TypeField>(new_type);
+ }
+
DECLARE_INSTRUCTION(StaticFieldGet);
private:
diff --git a/compiler/optimizing/nodes_shared.cc b/compiler/optimizing/nodes_shared.cc
index f982523634..2f971b93a6 100644
--- a/compiler/optimizing/nodes_shared.cc
+++ b/compiler/optimizing/nodes_shared.cc
@@ -54,6 +54,9 @@ void HDataProcWithShifterOp::GetOpInfoFromInstruction(HInstruction* instruction,
// default encoding 'LSL 0'.
*op_kind = kLSL;
*shift_amount = 0;
+ } else if (result_type == DataType::Type::kUint8 ||
+ (input_type == DataType::Type::kUint8 && input_size < result_size)) {
+ *op_kind = kUXTB;
} else if (result_type == DataType::Type::kUint16 ||
(input_type == DataType::Type::kUint16 && input_size < result_size)) {
*op_kind = kUXTH;
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 781a59f661..17540b9770 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -136,6 +136,20 @@ class HVecOperation : public HVariableInputSizeInstruction {
}
}
+ // Maps an integral type to the same-size unsigned type and leaves other types alone.
+ static DataType::Type ToUnsignedType(DataType::Type type) {
+ switch (type) {
+ case DataType::Type::kBool: // 1-byte storage unit
+ case DataType::Type::kInt8:
+ return DataType::Type::kUint8;
+ case DataType::Type::kInt16:
+ return DataType::Type::kUint16;
+ default:
+ DCHECK(type != DataType::Type::kVoid && type != DataType::Type::kReference) << type;
+ return type;
+ }
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(VecOperation);
protected:
@@ -254,6 +268,8 @@ inline static bool HasConsistentPackedTypes(HInstruction* input, DataType::Type
}
DCHECK(input->IsVecOperation());
DataType::Type input_type = input->AsVecOperation()->GetPackedType();
+ DCHECK_EQ(HVecOperation::ToUnsignedType(input_type) == HVecOperation::ToUnsignedType(type),
+ HVecOperation::ToSignedType(input_type) == HVecOperation::ToSignedType(type));
return HVecOperation::ToSignedType(input_type) == HVecOperation::ToSignedType(type);
}
@@ -943,8 +959,8 @@ class HVecSADAccumulate FINAL : public HVecOperation {
DCHECK(HasConsistentPackedTypes(accumulator, packed_type));
DCHECK(sad_left->IsVecOperation());
DCHECK(sad_right->IsVecOperation());
- DCHECK_EQ(sad_left->AsVecOperation()->GetPackedType(),
- sad_right->AsVecOperation()->GetPackedType());
+ DCHECK_EQ(ToSignedType(sad_left->AsVecOperation()->GetPackedType()),
+ ToSignedType(sad_right->AsVecOperation()->GetPackedType()));
SetRawInputAt(0, accumulator);
SetRawInputAt(1, sad_left);
SetRawInputAt(2, sad_right);
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index bd65cbf25e..b7380b0a49 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -63,6 +63,7 @@ class OptimizingCFITest : public CFITest {
// Generate simple frame with some spills.
code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
code_gen_->GetAssembler()->cfi().SetEnabled(true);
+ code_gen_->InitializeCodeGenerationData();
const int frame_size = 64;
int core_reg = 0;
int fp_reg = 0;
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index fde55cb92f..1e82c4b0f7 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -330,10 +330,10 @@ static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
static constexpr uint8_t expected_asm_kMips_adjust_head[] = {
0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB1, 0xAF,
0x34, 0x00, 0xB0, 0xAF, 0x28, 0x00, 0xB6, 0xF7, 0x20, 0x00, 0xB4, 0xF7,
- 0x08, 0x00, 0x80, 0x14, 0xFC, 0xFF, 0xBD, 0x27,
+ 0x08, 0x00, 0x80, 0x14, 0xF0, 0xFF, 0xBD, 0x27,
0x00, 0x00, 0xBF, 0xAF, 0x00, 0x00, 0x10, 0x04, 0x02, 0x00, 0x01, 0x3C,
0x18, 0x00, 0x21, 0x34, 0x21, 0x08, 0x3F, 0x00, 0x00, 0x00, 0xBF, 0x8F,
- 0x09, 0x00, 0x20, 0x00, 0x04, 0x00, 0xBD, 0x27,
+ 0x09, 0x00, 0x20, 0x00, 0x10, 0x00, 0xBD, 0x27,
};
static constexpr uint8_t expected_asm_kMips_adjust_tail[] = {
0x3C, 0x00, 0xBF, 0x8F, 0x38, 0x00, 0xB1, 0x8F, 0x34, 0x00, 0xB0, 0x8F,
@@ -342,7 +342,7 @@ static constexpr uint8_t expected_asm_kMips_adjust_tail[] = {
};
static constexpr uint8_t expected_cfi_kMips_adjust[] = {
0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x91, 0x02, 0x44, 0x90, 0x03,
- 0x50, 0x0E, 0x44, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
+ 0x50, 0x0E, 0x50, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B, 0x0E, 0x40,
};
// 0x00000000: addiu sp, sp, -64
@@ -356,8 +356,8 @@ static constexpr uint8_t expected_cfi_kMips_adjust[] = {
// 0x00000010: sdc1 f22, +40(sp)
// 0x00000014: sdc1 f20, +32(sp)
// 0x00000018: bnez a0, 0x0000003c ; +36
-// 0x0000001c: addiu sp, sp, -4
-// 0x00000020: .cfi_def_cfa_offset: 68
+// 0x0000001c: addiu sp, sp, -16
+// 0x00000020: .cfi_def_cfa_offset: 80
// 0x00000020: sw ra, +0(sp)
// 0x00000024: nal
// 0x00000028: lui at, 2
@@ -365,7 +365,7 @@ static constexpr uint8_t expected_cfi_kMips_adjust[] = {
// 0x00000030: addu at, at, ra
// 0x00000034: lw ra, +0(sp)
// 0x00000038: jr at
-// 0x0000003c: addiu sp, sp, 4
+// 0x0000003c: addiu sp, sp, 16
// 0x00000040: .cfi_def_cfa_offset: 64
// 0x00000040: nop
// ...
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 42f32b7866..29319f8c38 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1142,6 +1142,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
method = Emit(&allocator, &code_allocator, codegen.get(), compiler_driver, code_item);
if (kArenaAllocatorCountAllocations) {
+ codegen.reset(); // Release codegen's ScopedArenaAllocator for memory accounting.
size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
MemStats mem_stats(allocator.GetMemStats());
@@ -1251,18 +1252,6 @@ bool OptimizingCompiler::JitCompile(Thread* self,
if (codegen.get() == nullptr) {
return false;
}
-
- if (kArenaAllocatorCountAllocations) {
- size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
- if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
- MemStats mem_stats(allocator.GetMemStats());
- MemStats peak_stats(arena_stack.GetPeakStats());
- LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
- << dex_file->PrettyMethod(method_idx)
- << "\n" << Dumpable<MemStats>(mem_stats)
- << "\n" << Dumpable<MemStats>(peak_stats);
- }
- }
}
size_t stack_map_size = 0;
@@ -1357,6 +1346,19 @@ bool OptimizingCompiler::JitCompile(Thread* self,
jit_logger->WriteLog(code, code_allocator.GetSize(), method);
}
+ if (kArenaAllocatorCountAllocations) {
+ codegen.reset(); // Release codegen's ScopedArenaAllocator for memory accounting.
+ size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
+ if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
+ MemStats mem_stats(allocator.GetMemStats());
+ MemStats peak_stats(arena_stack.GetPeakStats());
+ LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
+ << dex_file->PrettyMethod(method_idx)
+ << "\n" << Dumpable<MemStats>(mem_stats)
+ << "\n" << Dumpable<MemStats>(peak_stats);
+ }
+ }
+
return true;
}
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 5ed9e0243f..1d3fe0334d 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -16,6 +16,7 @@
#include "register_allocation_resolver.h"
+#include "base/bit_vector-inl.h"
#include "code_generator.h"
#include "linear_order.h"
#include "ssa_liveness_analysis.h"
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 7eb2188a28..9bc80457a3 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -43,9 +43,12 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
current_entry_.dex_method_index = dex::kDexNoIndex;
current_entry_.dex_register_entry.num_dex_registers = num_dex_registers;
current_entry_.dex_register_entry.locations_start_index = dex_register_locations_.size();
- current_entry_.dex_register_entry.live_dex_registers_mask = (num_dex_registers != 0)
- ? ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream)
- : nullptr;
+ current_entry_.dex_register_entry.live_dex_registers_mask = nullptr;
+ if (num_dex_registers != 0u) {
+ current_entry_.dex_register_entry.live_dex_registers_mask =
+ ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
+ current_entry_.dex_register_entry.live_dex_registers_mask->ClearAllBits();
+ }
if (sp_mask != nullptr) {
stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
}
@@ -121,9 +124,12 @@ void StackMapStream::BeginInlineInfoEntry(ArtMethod* method,
current_inline_info_.dex_pc = dex_pc;
current_inline_info_.dex_register_entry.num_dex_registers = num_dex_registers;
current_inline_info_.dex_register_entry.locations_start_index = dex_register_locations_.size();
- current_inline_info_.dex_register_entry.live_dex_registers_mask = (num_dex_registers != 0)
- ? ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream)
- : nullptr;
+ current_inline_info_.dex_register_entry.live_dex_registers_mask = nullptr;
+ if (num_dex_registers != 0) {
+ current_inline_info_.dex_register_entry.live_dex_registers_mask =
+ ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
+ current_inline_info_.dex_register_entry.live_dex_registers_mask->ClearAllBits();
+ }
current_dex_register_ = 0;
}
@@ -468,7 +474,7 @@ size_t StackMapStream::AddDexRegisterMapEntry(const DexRegisterMapEntry& entry)
if (entries_it == dex_map_hash_to_stack_map_indices_.end()) {
// We don't have a perfect hash functions so we need a list to collect all stack maps
// which might have the same dex register map.
- ArenaVector<uint32_t> stack_map_indices(allocator_->Adapter(kArenaAllocStackMapStream));
+ ScopedArenaVector<uint32_t> stack_map_indices(allocator_->Adapter(kArenaAllocStackMapStream));
stack_map_indices.push_back(current_entry_index);
dex_map_hash_to_stack_map_indices_.Put(entry.hash, std::move(stack_map_indices));
} else {
@@ -546,7 +552,7 @@ void StackMapStream::CheckDexRegisterMap(const CodeInfo& code_info,
size_t StackMapStream::PrepareRegisterMasks() {
register_masks_.resize(stack_maps_.size(), 0u);
- ArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
+ ScopedArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
for (StackMapEntry& stack_map : stack_maps_) {
const size_t index = dedupe.size();
stack_map.register_mask_index = dedupe.emplace(stack_map.register_mask, index).first->second;
@@ -558,7 +564,7 @@ size_t StackMapStream::PrepareRegisterMasks() {
void StackMapStream::PrepareMethodIndices() {
CHECK(method_indices_.empty());
method_indices_.resize(stack_maps_.size() + inline_infos_.size());
- ArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
+ ScopedArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
for (StackMapEntry& stack_map : stack_maps_) {
const size_t index = dedupe.size();
const uint32_t method_index = stack_map.dex_method_index;
@@ -584,11 +590,11 @@ size_t StackMapStream::PrepareStackMasks(size_t entry_size_in_bits) {
stack_masks_.resize(byte_entry_size * stack_maps_.size(), 0u);
// For deduplicating we store the stack masks as byte packed for simplicity. We can bit pack later
// when copying out from stack_masks_.
- ArenaUnorderedMap<MemoryRegion,
- size_t,
- FNVHash<MemoryRegion>,
- MemoryRegion::ContentEquals> dedup(
- stack_maps_.size(), allocator_->Adapter(kArenaAllocStackMapStream));
+ ScopedArenaUnorderedMap<MemoryRegion,
+ size_t,
+ FNVHash<MemoryRegion>,
+ MemoryRegion::ContentEquals> dedup(
+ stack_maps_.size(), allocator_->Adapter(kArenaAllocStackMapStream));
for (StackMapEntry& stack_map : stack_maps_) {
size_t index = dedup.size();
MemoryRegion stack_mask(stack_masks_.data() + index * byte_entry_size, byte_entry_size);
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 62ed7ee0e5..e126609dba 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -17,9 +17,9 @@
#ifndef ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
#define ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
-#include "base/arena_containers.h"
#include "base/bit_vector-inl.h"
#include "base/hash_map.h"
+#include "base/scoped_arena_containers.h"
#include "base/value_object.h"
#include "memory_region.h"
#include "method_info.h"
@@ -60,8 +60,7 @@ class DexRegisterLocationHashFn {
*/
class StackMapStream : public ValueObject {
public:
- explicit StackMapStream(ArenaAllocator* allocator,
- InstructionSet instruction_set)
+ explicit StackMapStream(ScopedArenaAllocator* allocator, InstructionSet instruction_set)
: allocator_(allocator),
instruction_set_(instruction_set),
stack_maps_(allocator->Adapter(kArenaAllocStackMapStream)),
@@ -223,37 +222,37 @@ class StackMapStream : public ValueObject {
size_t dex_register_locations_index) const;
void CheckCodeInfo(MemoryRegion region) const;
- ArenaAllocator* const allocator_;
+ ScopedArenaAllocator* const allocator_;
const InstructionSet instruction_set_;
- ArenaVector<StackMapEntry> stack_maps_;
+ ScopedArenaVector<StackMapEntry> stack_maps_;
// A catalog of unique [location_kind, register_value] pairs (per method).
- ArenaVector<DexRegisterLocation> location_catalog_entries_;
+ ScopedArenaVector<DexRegisterLocation> location_catalog_entries_;
// Map from Dex register location catalog entries to their indices in the
// location catalog.
- using LocationCatalogEntriesIndices = ArenaHashMap<DexRegisterLocation,
- size_t,
- LocationCatalogEntriesIndicesEmptyFn,
- DexRegisterLocationHashFn>;
+ using LocationCatalogEntriesIndices = ScopedArenaHashMap<DexRegisterLocation,
+ size_t,
+ LocationCatalogEntriesIndicesEmptyFn,
+ DexRegisterLocationHashFn>;
LocationCatalogEntriesIndices location_catalog_entries_indices_;
// A set of concatenated maps of Dex register locations indices to `location_catalog_entries_`.
- ArenaVector<size_t> dex_register_locations_;
- ArenaVector<InlineInfoEntry> inline_infos_;
- ArenaVector<uint8_t> stack_masks_;
- ArenaVector<uint32_t> register_masks_;
- ArenaVector<uint32_t> method_indices_;
- ArenaVector<DexRegisterMapEntry> dex_register_entries_;
+ ScopedArenaVector<size_t> dex_register_locations_;
+ ScopedArenaVector<InlineInfoEntry> inline_infos_;
+ ScopedArenaVector<uint8_t> stack_masks_;
+ ScopedArenaVector<uint32_t> register_masks_;
+ ScopedArenaVector<uint32_t> method_indices_;
+ ScopedArenaVector<DexRegisterMapEntry> dex_register_entries_;
int stack_mask_max_;
uint32_t dex_pc_max_;
uint32_t register_mask_max_;
size_t number_of_stack_maps_with_inline_info_;
- ArenaSafeMap<uint32_t, ArenaVector<uint32_t>> dex_map_hash_to_stack_map_indices_;
+ ScopedArenaSafeMap<uint32_t, ScopedArenaVector<uint32_t>> dex_map_hash_to_stack_map_indices_;
StackMapEntry current_entry_;
InlineInfoEntry current_inline_info_;
- ArenaVector<uint8_t> code_info_encoding_;
+ ScopedArenaVector<uint8_t> code_info_encoding_;
size_t needed_size_;
uint32_t current_dex_register_;
bool in_inline_frame_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 96ac368ac3..91f86d5c50 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -47,7 +47,8 @@ using Kind = DexRegisterLocation::Kind;
TEST(StackMapTest, Test1) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, false);
@@ -128,7 +129,8 @@ TEST(StackMapTest, Test1) {
TEST(StackMapTest, Test2) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArtMethod art_method;
@@ -412,7 +414,8 @@ TEST(StackMapTest, Test2) {
TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArtMethod art_method;
@@ -506,7 +509,8 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
TEST(StackMapTest, TestNonLiveDexRegisters) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, false);
@@ -585,7 +589,8 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
// not treat it as kNoDexRegisterMap.
TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, false);
@@ -648,7 +653,8 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
TEST(StackMapTest, TestShareDexRegisterMap) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, false);
@@ -706,7 +712,8 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
TEST(StackMapTest, TestNoDexRegisterMap) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, false);
@@ -755,7 +762,8 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
TEST(StackMapTest, InlineTest) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArtMethod art_method;
@@ -936,7 +944,8 @@ TEST(StackMapTest, CodeOffsetTest) {
TEST(StackMapTest, TestDeduplicateStackMask) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, true);
@@ -964,7 +973,8 @@ TEST(StackMapTest, TestDeduplicateStackMask) {
TEST(StackMapTest, TestInvokeInfo) {
ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaStack arena_stack(&pool);
+ ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
ArenaBitVector sp_mask(&allocator, 0, true);
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index ae7636b106..ad84412ef5 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -650,6 +650,24 @@ class AssemblerTest : public testing::Test {
}
template <typename ImmType>
+ std::string RepeatRVIb(void (Ass::*f)(Reg, VecReg, ImmType),
+ int imm_bits,
+ const std::string& fmt,
+ int bias = 0,
+ int multiplier = 1) {
+ return RepeatTemplatedRegistersImmBits<Reg, VecReg, ImmType>(
+ f,
+ imm_bits,
+ GetRegisters(),
+ GetVectorRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetVecRegName,
+ fmt,
+ bias,
+ multiplier);
+ }
+
+ template <typename ImmType>
std::string RepeatVVIb(void (Ass::*f)(VecReg, VecReg, ImmType),
int imm_bits,
const std::string& fmt,
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index b83e3f5471..9545ca6869 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -1863,20 +1863,20 @@ void MipsAssembler::Not(Register rd, Register rs) {
}
void MipsAssembler::Push(Register rs) {
- IncreaseFrameSize(kMipsWordSize);
+ IncreaseFrameSize(kStackAlignment);
Sw(rs, SP, 0);
}
void MipsAssembler::Pop(Register rd) {
Lw(rd, SP, 0);
- DecreaseFrameSize(kMipsWordSize);
+ DecreaseFrameSize(kStackAlignment);
}
void MipsAssembler::PopAndReturn(Register rd, Register rt) {
bool reordering = SetReorder(false);
Lw(rd, SP, 0);
Jr(rt);
- DecreaseFrameSize(kMipsWordSize); // Single instruction in delay slot.
+ DecreaseFrameSize(kStackAlignment); // Single instruction in delay slot.
SetReorder(reordering);
}
@@ -2800,6 +2800,74 @@ void MipsAssembler::SplatiD(VectorRegister wd, VectorRegister ws, int n1) {
static_cast<FRegister>(ws));
}
+void MipsAssembler::Copy_sB(Register rd, VectorRegister ws, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ DsFsmInstrRf(EmitMsaELM(0x2, n4 | kMsaDfNByteMask, ws, static_cast<VectorRegister>(rd), 0x19),
+ rd,
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::Copy_sH(Register rd, VectorRegister ws, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ DsFsmInstrRf(EmitMsaELM(0x2, n3 | kMsaDfNHalfwordMask, ws, static_cast<VectorRegister>(rd), 0x19),
+ rd,
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::Copy_sW(Register rd, VectorRegister ws, int n2) {
+ CHECK(HasMsa());
+ CHECK(IsUint<2>(n2)) << n2;
+ DsFsmInstrRf(EmitMsaELM(0x2, n2 | kMsaDfNWordMask, ws, static_cast<VectorRegister>(rd), 0x19),
+ rd,
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::Copy_uB(Register rd, VectorRegister ws, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ DsFsmInstrRf(EmitMsaELM(0x3, n4 | kMsaDfNByteMask, ws, static_cast<VectorRegister>(rd), 0x19),
+ rd,
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::Copy_uH(Register rd, VectorRegister ws, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ DsFsmInstrRf(EmitMsaELM(0x3, n3 | kMsaDfNHalfwordMask, ws, static_cast<VectorRegister>(rd), 0x19),
+ rd,
+ static_cast<FRegister>(ws));
+}
+
+void MipsAssembler::InsertB(VectorRegister wd, Register rs, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ DsFsmInstrFffr(EmitMsaELM(0x4, n4 | kMsaDfNByteMask, static_cast<VectorRegister>(rs), wd, 0x19),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::InsertH(VectorRegister wd, Register rs, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ DsFsmInstrFffr(
+ EmitMsaELM(0x4, n3 | kMsaDfNHalfwordMask, static_cast<VectorRegister>(rs), wd, 0x19),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
+void MipsAssembler::InsertW(VectorRegister wd, Register rs, int n2) {
+ CHECK(HasMsa());
+ CHECK(IsUint<2>(n2)) << n2;
+ DsFsmInstrFffr(EmitMsaELM(0x4, n2 | kMsaDfNWordMask, static_cast<VectorRegister>(rs), wd, 0x19),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(wd),
+ rs);
+}
+
void MipsAssembler::FillB(VectorRegister wd, Register rs) {
CHECK(HasMsa());
DsFsmInstrFr(EmitMsa2R(0xc0, 0x0, static_cast<VectorRegister>(rs), wd, 0x1e),
@@ -2921,6 +2989,38 @@ void MipsAssembler::StD(VectorRegister wd, Register rs, int offset) {
rs);
}
+void MipsAssembler::IlvlB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvlH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvlW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvlD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
void MipsAssembler::IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
CHECK(HasMsa());
DsFsmInstrFff(EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x14),
@@ -2953,6 +3053,70 @@ void MipsAssembler::IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister w
static_cast<FRegister>(wt));
}
+void MipsAssembler::IlvevB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvevH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvevW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvevD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvodB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvodH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvodW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::IlvodD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x14),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
void MipsAssembler::MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
CHECK(HasMsa());
DsFsmInstrFff(EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x12),
@@ -3049,6 +3213,54 @@ void MipsAssembler::FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister
static_cast<FRegister>(wt));
}
+void MipsAssembler::Hadd_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x15),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Hadd_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x15),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Hadd_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x15),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Hadd_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x15),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x15),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
+void MipsAssembler::Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ DsFsmInstrFff(EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x15),
+ static_cast<FRegister>(wd),
+ static_cast<FRegister>(ws),
+ static_cast<FRegister>(wt));
+}
+
void MipsAssembler::ReplicateFPToVectorRegister(VectorRegister dst,
FRegister src,
bool is_double) {
@@ -3353,8 +3565,6 @@ MipsAssembler::Branch::Branch(bool is_r6,
CHECK_NE(dest_reg, ZERO);
if (is_r6) {
CHECK_EQ(base_reg, ZERO);
- } else {
- CHECK_NE(base_reg, ZERO);
}
InitializeType(label_or_literal_type, is_r6);
}
@@ -3646,15 +3856,29 @@ uint32_t MipsAssembler::GetBranchLocationOrPcRelBase(const MipsAssembler::Branch
case Branch::kFarLabel:
case Branch::kLiteral:
case Branch::kFarLiteral:
- return GetLabelLocation(&pc_rel_base_label_);
+ if (branch->GetRightRegister() != ZERO) {
+ return GetLabelLocation(&pc_rel_base_label_);
+ }
+ // For those label/literal loads which come with their own NAL instruction
+ // and don't depend on `pc_rel_base_label_` we can simply use the location
+ // of the "branch" (the NAL precedes the "branch" immediately). The location
+ // is close enough for the user of the returned location, PromoteIfNeeded(),
+ // to not miss needed promotion to a far load.
+ // (GetOffsetSizeNeeded() provides a little leeway by means of kMaxBranchSize,
+ // which is larger than all composite branches and label/literal loads: it's
+ // OK to promote a bit earlier than strictly necessary, it makes things
+ // simpler.)
+ FALLTHROUGH_INTENDED;
default:
return branch->GetLocation();
}
}
uint32_t MipsAssembler::Branch::PromoteIfNeeded(uint32_t location, uint32_t max_short_distance) {
- // `location` is either `GetLabelLocation(&pc_rel_base_label_)` for R2 labels/literals or
- // `this->GetLocation()` for everything else.
+ // `location` comes from GetBranchLocationOrPcRelBase() and is either the location
+ // of the PC-relative branch or (for some R2 label and literal loads) the location
+ // of `pc_rel_base_label_`. The PC-relative offset of the branch/load is relative
+ // to this location.
// If the branch is still unresolved or already long, nothing to do.
if (IsLong() || !IsResolved()) {
return 0;
@@ -3695,7 +3919,15 @@ uint32_t MipsAssembler::GetBranchOrPcRelBaseForEncoding(const MipsAssembler::Bra
case Branch::kFarLabel:
case Branch::kLiteral:
case Branch::kFarLiteral:
- return GetLabelLocation(&pc_rel_base_label_);
+ if (branch->GetRightRegister() == ZERO) {
+ // These loads don't use `pc_rel_base_label_` and instead rely on their own
+ // NAL instruction (it immediately precedes the "branch"). Therefore the
+ // effective PC-relative base register is RA and it corresponds to the 2nd
+ // instruction after the NAL.
+ return branch->GetLocation() + sizeof(uint32_t);
+ } else {
+ return GetLabelLocation(&pc_rel_base_label_);
+ }
default:
return branch->GetOffsetLocation() +
Branch::branch_info_[branch->GetType()].pc_org * sizeof(uint32_t);
@@ -3703,9 +3935,10 @@ uint32_t MipsAssembler::GetBranchOrPcRelBaseForEncoding(const MipsAssembler::Bra
}
uint32_t MipsAssembler::Branch::GetOffset(uint32_t location) const {
- // `location` is either `GetLabelLocation(&pc_rel_base_label_)` for R2 labels/literals or
- // `this->GetOffsetLocation() + branch_info_[this->GetType()].pc_org * sizeof(uint32_t)`
- // for everything else.
+ // `location` comes from GetBranchOrPcRelBaseForEncoding() and is either a location
+ // within/near the PC-relative branch or (for some R2 label and literal loads) the
+ // location of `pc_rel_base_label_`. The PC-relative offset of the branch/load is
+ // relative to this location.
CHECK(IsResolved());
uint32_t ofs_mask = 0xFFFFFFFF >> (32 - GetOffsetSize());
// Calculate the byte distance between instructions and also account for
@@ -4001,6 +4234,12 @@ void MipsAssembler::Call(MipsLabel* label, bool is_r6, bool is_bare) {
void MipsAssembler::LoadLabelAddress(Register dest_reg, Register base_reg, MipsLabel* label) {
// Label address loads are treated as pseudo branches since they require very similar handling.
DCHECK(!label->IsBound());
+ // If `pc_rel_base_label_` isn't bound or none of registers contains its address, we
+ // may generate an individual NAL instruction to simulate PC-relative addressing on R2
+ // by specifying `base_reg` of `ZERO`. Check for it.
+ if (base_reg == ZERO && !IsR6()) {
+ Nal();
+ }
branches_.emplace_back(IsR6(), buffer_.Size(), dest_reg, base_reg, Branch::kLabel);
FinalizeLabeledBranch(label);
}
@@ -4016,6 +4255,12 @@ void MipsAssembler::LoadLiteral(Register dest_reg, Register base_reg, Literal* l
DCHECK_EQ(literal->GetSize(), 4u);
MipsLabel* label = literal->GetLabel();
DCHECK(!label->IsBound());
+ // If `pc_rel_base_label_` isn't bound or none of registers contains its address, we
+ // may generate an individual NAL instruction to simulate PC-relative addressing on R2
+ // by specifying `base_reg` of `ZERO`. Check for it.
+ if (base_reg == ZERO && !IsR6()) {
+ Nal();
+ }
branches_.emplace_back(IsR6(), buffer_.Size(), dest_reg, base_reg, Branch::kLiteral);
FinalizeLabeledBranch(label);
}
@@ -4203,6 +4448,13 @@ static inline bool IsAbsorbableInstruction(uint32_t instruction) {
}
}
+static inline Register GetR2PcRelBaseRegister(Register reg) {
+ // LoadLabelAddress() and LoadLiteral() generate individual NAL
+ // instructions on R2 when the specified base register is ZERO
+ // and so the effective PC-relative base register is RA, not ZERO.
+ return (reg == ZERO) ? RA : reg;
+}
+
// Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
void MipsAssembler::EmitBranch(uint32_t branch_id) {
CHECK_EQ(overwriting_, true);
@@ -4293,13 +4545,13 @@ void MipsAssembler::EmitBranch(uint32_t branch_id) {
case Branch::kLabel:
DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
- Addiu(lhs, rhs, offset);
+ Addiu(lhs, GetR2PcRelBaseRegister(rhs), offset);
break;
// R2 near literal.
case Branch::kLiteral:
DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
- Lw(lhs, rhs, offset);
+ Lw(lhs, GetR2PcRelBaseRegister(rhs), offset);
break;
// R2 long branches.
@@ -4336,7 +4588,7 @@ void MipsAssembler::EmitBranch(uint32_t branch_id) {
Addu(AT, AT, RA);
Lw(RA, SP, 0);
Jr(AT);
- DecreaseFrameSize(kMipsWordSize);
+ DecreaseFrameSize(kStackAlignment);
break;
case Branch::kLongCondBranch:
// The comment on case 'Branch::kLongUncondBranch' applies here as well.
@@ -4356,7 +4608,7 @@ void MipsAssembler::EmitBranch(uint32_t branch_id) {
Addu(AT, AT, RA);
Lw(RA, SP, 0);
Jr(AT);
- DecreaseFrameSize(kMipsWordSize);
+ DecreaseFrameSize(kStackAlignment);
break;
case Branch::kLongCall:
DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
@@ -4378,7 +4630,7 @@ void MipsAssembler::EmitBranch(uint32_t branch_id) {
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Lui(AT, High16Bits(offset));
Ori(AT, AT, Low16Bits(offset));
- Addu(lhs, AT, rhs);
+ Addu(lhs, AT, GetR2PcRelBaseRegister(rhs));
break;
// R2 far literal.
case Branch::kFarLiteral:
@@ -4386,7 +4638,7 @@ void MipsAssembler::EmitBranch(uint32_t branch_id) {
offset += (offset & 0x8000) << 1; // Account for sign extension in lw.
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Lui(AT, High16Bits(offset));
- Addu(AT, AT, rhs);
+ Addu(AT, AT, GetR2PcRelBaseRegister(rhs));
Lw(lhs, AT, Low16Bits(offset));
break;
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 57b3edd03a..c0ea29fbd7 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -601,6 +601,14 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void SplatiH(VectorRegister wd, VectorRegister ws, int n3);
void SplatiW(VectorRegister wd, VectorRegister ws, int n2);
void SplatiD(VectorRegister wd, VectorRegister ws, int n1);
+ void Copy_sB(Register rd, VectorRegister ws, int n4);
+ void Copy_sH(Register rd, VectorRegister ws, int n3);
+ void Copy_sW(Register rd, VectorRegister ws, int n2);
+ void Copy_uB(Register rd, VectorRegister ws, int n4);
+ void Copy_uH(Register rd, VectorRegister ws, int n3);
+ void InsertB(VectorRegister wd, Register rs, int n4);
+ void InsertH(VectorRegister wd, Register rs, int n3);
+ void InsertW(VectorRegister wd, Register rs, int n2);
void FillB(VectorRegister wd, Register rs);
void FillH(VectorRegister wd, Register rs);
void FillW(VectorRegister wd, Register rs);
@@ -618,10 +626,22 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void StW(VectorRegister wd, Register rs, int offset);
void StD(VectorRegister wd, Register rs, int offset);
+ void IlvlB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvlH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvlW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvlD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void MaddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
@@ -636,6 +656,13 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
void FmsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
// Helper for replicating floating point value in all destination elements.
void ReplicateFPToVectorRegister(VectorRegister dst, FRegister src, bool is_double);
@@ -1061,16 +1088,36 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
return NewLiteral(sizeof(value), reinterpret_cast<const uint8_t*>(&value));
}
- // Load label address using the base register (for R2 only) or using PC-relative loads
- // (for R6 only; base_reg must be ZERO). To be used with data labels in the literal /
- // jump table area only and not with regular code labels.
+ // Load label address using PC-relative addressing.
+ // To be used with data labels in the literal / jump table area only and not
+ // with regular code labels.
+ //
+ // For R6 base_reg must be ZERO.
+ //
+ // On R2 there are two possible uses w.r.t. base_reg:
+ //
+ // - base_reg = ZERO:
+ // The NAL instruction will be generated as part of the load and it will
+ // clobber the RA register.
+ //
+ // - base_reg != ZERO:
+ // The RA-clobbering NAL instruction won't be generated as part of the load.
+ // The label pc_rel_base_label_ must be bound (with BindPcRelBaseLabel())
+ // and base_reg must hold the address of the label. Example:
+ // __ Nal();
+ // __ Move(S3, RA);
+ // __ BindPcRelBaseLabel(); // S3 holds the address of pc_rel_base_label_.
+ // __ LoadLabelAddress(A0, S3, label1);
+ // __ LoadLabelAddress(A1, S3, label2);
+ // __ LoadLiteral(V0, S3, literal1);
+ // __ LoadLiteral(V1, S3, literal2);
void LoadLabelAddress(Register dest_reg, Register base_reg, MipsLabel* label);
// Create a new literal with the given data.
Literal* NewLiteral(size_t size, const uint8_t* data);
- // Load literal using the base register (for R2 only) or using PC-relative loads
- // (for R6 only; base_reg must be ZERO).
+ // Load literal using PC-relative addressing.
+ // See the above comments for LoadLabelAddress() on the value of base_reg.
void LoadLiteral(Register dest_reg, Register base_reg, Literal* literal);
// Create a jump table for the given labels that will be emitted when finalizing.
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index b12b6b651c..c76a568ddd 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -2185,6 +2185,46 @@ TEST_F(AssemblerMIPS32r6Test, SplatiD) {
"splati.d");
}
+TEST_F(AssemblerMIPS32r6Test, Copy_sB) {
+ DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_sB, 4, "copy_s.b ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Copy_sH) {
+ DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_sH, 3, "copy_s.h ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Copy_sW) {
+ DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_sW, 2, "copy_s.w ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Copy_uB) {
+ DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_uB, 4, "copy_u.b ${reg1}, ${reg2}[{imm}]"),
+ "copy_u.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Copy_uH) {
+ DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_uH, 3, "copy_u.h ${reg1}, ${reg2}[{imm}]"),
+ "copy_u.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, InsertB) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::InsertB, 4, "insert.b ${reg1}[{imm}], ${reg2}"),
+ "insert.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, InsertH) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::InsertH, 3, "insert.h ${reg1}[{imm}], ${reg2}"),
+ "insert.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, InsertW) {
+ DriverStr(RepeatVRIb(&mips::MipsAssembler::InsertW, 2, "insert.w ${reg1}[{imm}], ${reg2}"),
+ "insert.w");
+}
+
TEST_F(AssemblerMIPS32r6Test, FillB) {
DriverStr(RepeatVR(&mips::MipsAssembler::FillB, "fill.b ${reg1}, ${reg2}"), "fill.b");
}
@@ -2251,6 +2291,22 @@ TEST_F(AssemblerMIPS32r6Test, StD) {
"st.d");
}
+TEST_F(AssemblerMIPS32r6Test, IlvlB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvlB, "ilvl.b ${reg1}, ${reg2}, ${reg3}"), "ilvl.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvlH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvlH, "ilvl.h ${reg1}, ${reg2}, ${reg3}"), "ilvl.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvlW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvlW, "ilvl.w ${reg1}, ${reg2}, ${reg3}"), "ilvl.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvlD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvlD, "ilvl.d ${reg1}, ${reg2}, ${reg3}"), "ilvl.d");
+}
+
TEST_F(AssemblerMIPS32r6Test, IlvrB) {
DriverStr(RepeatVVV(&mips::MipsAssembler::IlvrB, "ilvr.b ${reg1}, ${reg2}, ${reg3}"), "ilvr.b");
}
@@ -2267,6 +2323,46 @@ TEST_F(AssemblerMIPS32r6Test, IlvrD) {
DriverStr(RepeatVVV(&mips::MipsAssembler::IlvrD, "ilvr.d ${reg1}, ${reg2}, ${reg3}"), "ilvr.d");
}
+TEST_F(AssemblerMIPS32r6Test, IlvevB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvevB, "ilvev.b ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvevH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvevH, "ilvev.h ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvevW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvevW, "ilvev.w ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvevD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvevD, "ilvev.d ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvodB) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvodB, "ilvod.b ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvodH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvodH, "ilvod.h ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvodW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvodW, "ilvod.w ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, IlvodD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::IlvodD, "ilvod.d ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.d");
+}
+
TEST_F(AssemblerMIPS32r6Test, MaddvB) {
DriverStr(RepeatVVV(&mips::MipsAssembler::MaddvB, "maddv.b ${reg1}, ${reg2}, ${reg3}"),
"maddv.b");
@@ -2287,6 +2383,36 @@ TEST_F(AssemblerMIPS32r6Test, MaddvD) {
"maddv.d");
}
+TEST_F(AssemblerMIPS32r6Test, Hadd_sH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_sH, "hadd_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_s.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Hadd_sW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_sW, "hadd_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_s.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Hadd_sD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_sD, "hadd_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_s.d");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Hadd_uH) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_uH, "hadd_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_u.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Hadd_uW) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_uW, "hadd_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_u.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, Hadd_uD) {
+ DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_uD, "hadd_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_u.d");
+}
+
TEST_F(AssemblerMIPS32r6Test, MsubvB) {
DriverStr(RepeatVVV(&mips::MipsAssembler::MsubvB, "msubv.b ${reg1}, ${reg2}, ${reg3}"),
"msubv.b");
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index eed83a5528..b027d3a549 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -2803,7 +2803,7 @@ TEST_F(AssemblerMIPSTest, LongBranchReorder) {
oss <<
".set noreorder\n"
"addiu $t0, $t1, 0x5678\n"
- "addiu $sp, $sp, -4\n"
+ "addiu $sp, $sp, -16\n"
"sw $ra, 0($sp)\n"
"bltzal $zero, .+4\n"
"lui $at, 0x" << std::hex << High16Bits(offset_forward) << "\n"
@@ -2811,11 +2811,11 @@ TEST_F(AssemblerMIPSTest, LongBranchReorder) {
"addu $at, $at, $ra\n"
"lw $ra, 0($sp)\n"
"jalr $zero, $at\n"
- "addiu $sp, $sp, 4\n" <<
+ "addiu $sp, $sp, 16\n" <<
RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") <<
RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") <<
"addiu $t0, $t1, 0x5678\n"
- "addiu $sp, $sp, -4\n"
+ "addiu $sp, $sp, -16\n"
"sw $ra, 0($sp)\n"
"bltzal $zero, .+4\n"
"lui $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
@@ -2823,7 +2823,7 @@ TEST_F(AssemblerMIPSTest, LongBranchReorder) {
"addu $at, $at, $ra\n"
"lw $ra, 0($sp)\n"
"jalr $zero, $at\n"
- "addiu $sp, $sp, 4\n";
+ "addiu $sp, $sp, 16\n";
std::string expected = oss.str();
DriverStr(expected, "LongBranchReorder");
EXPECT_EQ(__ GetLabelLocation(&patcher_label1), 0 * 4u);
@@ -2913,6 +2913,46 @@ TEST_F(AssemblerMIPSTest, LoadNearestFarLabelAddress) {
DriverStr(expected, "LoadNearestFarLabelAddress");
}
+TEST_F(AssemblerMIPSTest, LoadFarthestNearLabelAddressUsingNal) {
+ mips::MipsLabel label;
+ __ LoadLabelAddress(mips::V0, mips::ZERO, &label);
+ constexpr size_t kAddiuCount = 0x1FDE;
+ for (size_t i = 0; i != kAddiuCount; ++i) {
+ __ Addiu(mips::A0, mips::A1, 0);
+ }
+ __ Bind(&label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bltzal $zero, .+4\n"
+ "addiu $v0, $ra, %lo(2f - 1f)\n"
+ "1:\n" +
+ RepeatInsn(kAddiuCount, "addiu $a0, $a1, %hi(2f - 1b)\n") +
+ "2:\n";
+ DriverStr(expected, "LoadFarthestNearLabelAddressUsingNal");
+}
+
+TEST_F(AssemblerMIPSTest, LoadNearestFarLabelAddressUsingNal) {
+ mips::MipsLabel label;
+ __ LoadLabelAddress(mips::V0, mips::ZERO, &label);
+ constexpr size_t kAdduCount = 0x1FDF;
+ for (size_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bltzal $zero, .+4\n"
+ "lui $at, %hi(2f - 1f)\n"
+ "1:\n"
+ "ori $at, $at, %lo(2f - 1b)\n"
+ "addu $v0, $at, $ra\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "2:\n";
+ DriverStr(expected, "LoadNearestFarLabelAddressUsingNal");
+}
+
TEST_F(AssemblerMIPSTest, LoadFarthestNearLiteral) {
mips::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
__ BindPcRelBaseLabel();
@@ -2951,6 +2991,46 @@ TEST_F(AssemblerMIPSTest, LoadNearestFarLiteral) {
DriverStr(expected, "LoadNearestFarLiteral");
}
+TEST_F(AssemblerMIPSTest, LoadFarthestNearLiteralUsingNal) {
+ mips::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
+ __ LoadLiteral(mips::V0, mips::ZERO, literal);
+ constexpr size_t kAddiuCount = 0x1FDE;
+ for (size_t i = 0; i != kAddiuCount; ++i) {
+ __ Addiu(mips::A0, mips::A1, 0);
+ }
+
+ std::string expected =
+ ".set noreorder\n"
+ "bltzal $zero, .+4\n"
+ "lw $v0, %lo(2f - 1f)($ra)\n"
+ "1:\n" +
+ RepeatInsn(kAddiuCount, "addiu $a0, $a1, %hi(2f - 1b)\n") +
+ "2:\n"
+ ".word 0x12345678\n";
+ DriverStr(expected, "LoadFarthestNearLiteralUsingNal");
+}
+
+TEST_F(AssemblerMIPSTest, LoadNearestFarLiteralUsingNal) {
+ mips::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
+ __ LoadLiteral(mips::V0, mips::ZERO, literal);
+ constexpr size_t kAdduCount = 0x1FDF;
+ for (size_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+
+ std::string expected =
+ ".set noreorder\n"
+ "bltzal $zero, .+4\n"
+ "lui $at, %hi(2f - 1f)\n"
+ "1:\n"
+ "addu $at, $at, $ra\n"
+ "lw $v0, %lo(2f - 1b)($at)\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "2:\n"
+ ".word 0x12345678\n";
+ DriverStr(expected, "LoadNearestFarLiteralUsingNal");
+}
+
#undef __
} // namespace art
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 606d4c39d0..d8a4531ac2 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -1874,6 +1874,72 @@ void Mips64Assembler::SplatiD(VectorRegister wd, VectorRegister ws, int n1) {
EmitMsaELM(0x1, n1 | kMsaDfNDoublewordMask, ws, wd, 0x19);
}
+void Mips64Assembler::Copy_sB(GpuRegister rd, VectorRegister ws, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ EmitMsaELM(0x2, n4 | kMsaDfNByteMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::Copy_sH(GpuRegister rd, VectorRegister ws, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ EmitMsaELM(0x2, n3 | kMsaDfNHalfwordMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::Copy_sW(GpuRegister rd, VectorRegister ws, int n2) {
+ CHECK(HasMsa());
+ CHECK(IsUint<2>(n2)) << n2;
+ EmitMsaELM(0x2, n2 | kMsaDfNWordMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::Copy_sD(GpuRegister rd, VectorRegister ws, int n1) {
+ CHECK(HasMsa());
+ CHECK(IsUint<1>(n1)) << n1;
+ EmitMsaELM(0x2, n1 | kMsaDfNDoublewordMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::Copy_uB(GpuRegister rd, VectorRegister ws, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ EmitMsaELM(0x3, n4 | kMsaDfNByteMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::Copy_uH(GpuRegister rd, VectorRegister ws, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ EmitMsaELM(0x3, n3 | kMsaDfNHalfwordMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::Copy_uW(GpuRegister rd, VectorRegister ws, int n2) {
+ CHECK(HasMsa());
+ CHECK(IsUint<2>(n2)) << n2;
+ EmitMsaELM(0x3, n2 | kMsaDfNWordMask, ws, static_cast<VectorRegister>(rd), 0x19);
+}
+
+void Mips64Assembler::InsertB(VectorRegister wd, GpuRegister rs, int n4) {
+ CHECK(HasMsa());
+ CHECK(IsUint<4>(n4)) << n4;
+ EmitMsaELM(0x4, n4 | kMsaDfNByteMask, static_cast<VectorRegister>(rs), wd, 0x19);
+}
+
+void Mips64Assembler::InsertH(VectorRegister wd, GpuRegister rs, int n3) {
+ CHECK(HasMsa());
+ CHECK(IsUint<3>(n3)) << n3;
+ EmitMsaELM(0x4, n3 | kMsaDfNHalfwordMask, static_cast<VectorRegister>(rs), wd, 0x19);
+}
+
+void Mips64Assembler::InsertW(VectorRegister wd, GpuRegister rs, int n2) {
+ CHECK(HasMsa());
+ CHECK(IsUint<2>(n2)) << n2;
+ EmitMsaELM(0x4, n2 | kMsaDfNWordMask, static_cast<VectorRegister>(rs), wd, 0x19);
+}
+
+void Mips64Assembler::InsertD(VectorRegister wd, GpuRegister rs, int n1) {
+ CHECK(HasMsa());
+ CHECK(IsUint<1>(n1)) << n1;
+ EmitMsaELM(0x4, n1 | kMsaDfNDoublewordMask, static_cast<VectorRegister>(rs), wd, 0x19);
+}
+
void Mips64Assembler::FillB(VectorRegister wd, GpuRegister rs) {
CHECK(HasMsa());
EmitMsa2R(0xc0, 0x0, static_cast<VectorRegister>(rs), wd, 0x1e);
@@ -1972,6 +2038,26 @@ void Mips64Assembler::StD(VectorRegister wd, GpuRegister rs, int offset) {
EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x9, 0x3);
}
+void Mips64Assembler::IlvlB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvlH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvlW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvlD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x14);
+}
+
void Mips64Assembler::IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
CHECK(HasMsa());
EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x14);
@@ -1992,6 +2078,46 @@ void Mips64Assembler::IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister
EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x14);
}
+void Mips64Assembler::IlvevB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvevH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvevW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvevD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvodB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvodH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvodW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x14);
+}
+
+void Mips64Assembler::IlvodD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x14);
+}
+
void Mips64Assembler::MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
CHECK(HasMsa());
EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x12);
@@ -2052,6 +2178,36 @@ void Mips64Assembler::FmsubD(VectorRegister wd, VectorRegister ws, VectorRegiste
EmitMsa3R(0x2, 0x3, wt, ws, wd, 0x1b);
}
+void Mips64Assembler::Hadd_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x15);
+}
+
+void Mips64Assembler::Hadd_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x15);
+}
+
+void Mips64Assembler::Hadd_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x15);
+}
+
+void Mips64Assembler::Hadd_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x15);
+}
+
+void Mips64Assembler::Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x15);
+}
+
+void Mips64Assembler::Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
+ EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x15);
+}
+
void Mips64Assembler::ReplicateFPToVectorRegister(VectorRegister dst,
FpuRegister src,
bool is_double) {
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index a3787ac6ae..d67fb0054d 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -785,6 +785,17 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void SplatiH(VectorRegister wd, VectorRegister ws, int n3);
void SplatiW(VectorRegister wd, VectorRegister ws, int n2);
void SplatiD(VectorRegister wd, VectorRegister ws, int n1);
+ void Copy_sB(GpuRegister rd, VectorRegister ws, int n4);
+ void Copy_sH(GpuRegister rd, VectorRegister ws, int n3);
+ void Copy_sW(GpuRegister rd, VectorRegister ws, int n2);
+ void Copy_sD(GpuRegister rd, VectorRegister ws, int n1);
+ void Copy_uB(GpuRegister rd, VectorRegister ws, int n4);
+ void Copy_uH(GpuRegister rd, VectorRegister ws, int n3);
+ void Copy_uW(GpuRegister rd, VectorRegister ws, int n2);
+ void InsertB(VectorRegister wd, GpuRegister rs, int n4);
+ void InsertH(VectorRegister wd, GpuRegister rs, int n3);
+ void InsertW(VectorRegister wd, GpuRegister rs, int n2);
+ void InsertD(VectorRegister wd, GpuRegister rs, int n1);
void FillB(VectorRegister wd, GpuRegister rs);
void FillH(VectorRegister wd, GpuRegister rs);
void FillW(VectorRegister wd, GpuRegister rs);
@@ -803,10 +814,22 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void StW(VectorRegister wd, GpuRegister rs, int offset);
void StD(VectorRegister wd, GpuRegister rs, int offset);
+ void IlvlB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvlH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvlW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvlD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvevD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void IlvodD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void MaddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
@@ -821,6 +844,13 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void FmsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
void FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+ void Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
+
// Helper for replicating floating point value in all destination elements.
void ReplicateFPToVectorRegister(VectorRegister dst, FpuRegister src, bool is_double);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index bf0326de87..164af7891c 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -3399,6 +3399,61 @@ TEST_F(AssemblerMIPS64Test, SplatiD) {
"splati.d");
}
+TEST_F(AssemblerMIPS64Test, Copy_sB) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_sB, 4, "copy_s.b ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Copy_sH) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_sH, 3, "copy_s.h ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Copy_sW) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_sW, 2, "copy_s.w ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Copy_sD) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_sD, 1, "copy_s.d ${reg1}, ${reg2}[{imm}]"),
+ "copy_s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Copy_uB) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_uB, 4, "copy_u.b ${reg1}, ${reg2}[{imm}]"),
+ "copy_u.b");
+}
+
+TEST_F(AssemblerMIPS64Test, Copy_uH) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_uH, 3, "copy_u.h ${reg1}, ${reg2}[{imm}]"),
+ "copy_u.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Copy_uW) {
+ DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_uW, 2, "copy_u.w ${reg1}, ${reg2}[{imm}]"),
+ "copy_u.w");
+}
+
+TEST_F(AssemblerMIPS64Test, InsertB) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::InsertB, 4, "insert.b ${reg1}[{imm}], ${reg2}"),
+ "insert.b");
+}
+
+TEST_F(AssemblerMIPS64Test, InsertH) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::InsertH, 3, "insert.h ${reg1}[{imm}], ${reg2}"),
+ "insert.h");
+}
+
+TEST_F(AssemblerMIPS64Test, InsertW) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::InsertW, 2, "insert.w ${reg1}[{imm}], ${reg2}"),
+ "insert.w");
+}
+
+TEST_F(AssemblerMIPS64Test, InsertD) {
+ DriverStr(RepeatVRIb(&mips64::Mips64Assembler::InsertD, 1, "insert.d ${reg1}[{imm}], ${reg2}"),
+ "insert.d");
+}
+
TEST_F(AssemblerMIPS64Test, FillB) {
DriverStr(RepeatVR(&mips64::Mips64Assembler::FillB, "fill.b ${reg1}, ${reg2}"), "fill.b");
}
@@ -3469,6 +3524,26 @@ TEST_F(AssemblerMIPS64Test, StD) {
"st.d");
}
+TEST_F(AssemblerMIPS64Test, IlvlB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvlB, "ilvl.b ${reg1}, ${reg2}, ${reg3}"),
+ "ilvl.b");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvlH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvlH, "ilvl.h ${reg1}, ${reg2}, ${reg3}"),
+ "ilvl.h");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvlW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvlW, "ilvl.w ${reg1}, ${reg2}, ${reg3}"),
+ "ilvl.w");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvlD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvlD, "ilvl.d ${reg1}, ${reg2}, ${reg3}"),
+ "ilvl.d");
+}
+
TEST_F(AssemblerMIPS64Test, IlvrB) {
DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvrB, "ilvr.b ${reg1}, ${reg2}, ${reg3}"),
"ilvr.b");
@@ -3489,6 +3564,46 @@ TEST_F(AssemblerMIPS64Test, IlvrD) {
"ilvr.d");
}
+TEST_F(AssemblerMIPS64Test, IlvevB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvevB, "ilvev.b ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.b");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvevH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvevH, "ilvev.h ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.h");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvevW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvevW, "ilvev.w ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.w");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvevD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvevD, "ilvev.d ${reg1}, ${reg2}, ${reg3}"),
+ "ilvev.d");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvodB) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvodB, "ilvod.b ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.b");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvodH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvodH, "ilvod.h ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.h");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvodW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvodW, "ilvod.w ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.w");
+}
+
+TEST_F(AssemblerMIPS64Test, IlvodD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvodD, "ilvod.d ${reg1}, ${reg2}, ${reg3}"),
+ "ilvod.d");
+}
+
TEST_F(AssemblerMIPS64Test, MaddvB) {
DriverStr(RepeatVVV(&mips64::Mips64Assembler::MaddvB, "maddv.b ${reg1}, ${reg2}, ${reg3}"),
"maddv.b");
@@ -3509,6 +3624,36 @@ TEST_F(AssemblerMIPS64Test, MaddvD) {
"maddv.d");
}
+TEST_F(AssemblerMIPS64Test, Hadd_sH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_sH, "hadd_s.h ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_s.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Hadd_sW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_sW, "hadd_s.w ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_s.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Hadd_sD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_sD, "hadd_s.d ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, Hadd_uH) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_uH, "hadd_u.h ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_u.h");
+}
+
+TEST_F(AssemblerMIPS64Test, Hadd_uW) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_uW, "hadd_u.w ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_u.w");
+}
+
+TEST_F(AssemblerMIPS64Test, Hadd_uD) {
+ DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_uD, "hadd_u.d ${reg1}, ${reg2}, ${reg3}"),
+ "hadd_u.d");
+}
+
TEST_F(AssemblerMIPS64Test, MsubvB) {
DriverStr(RepeatVVV(&mips64::Mips64Assembler::MsubvB, "msubv.b ${reg1}, ${reg2}, ${reg3}"),
"msubv.b");
diff --git a/dalvikvm/Android.bp b/dalvikvm/Android.bp
index cca9ac4dbe..c1944fbe59 100644
--- a/dalvikvm/Android.bp
+++ b/dalvikvm/Android.bp
@@ -34,9 +34,8 @@ art_cc_binary {
shared_libs: [
"liblog",
],
- ldflags: ["-Wl,--export-dynamic"],
},
- linux_glibc: {
+ linux: {
ldflags: ["-Wl,--export-dynamic"],
},
},
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 528cf3a0a7..affe639f8d 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -84,7 +84,6 @@
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
#include "oat_file.h"
#include "oat_file_assistant.h"
#include "os.h"
@@ -449,6 +448,9 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError(" --dirty-image-objects=<directory-path>: list of known dirty objects in the image.");
UsageError(" The image writer will group them together.");
UsageError("");
+ UsageError(" --compact-dex-level=none|fast: None avoids generating compact dex, fast");
+ UsageError(" generates compact dex with low compile time.");
+ UsageError("");
std::cerr << "See log for usage error information\n";
exit(EXIT_FAILURE);
}
@@ -1164,6 +1166,7 @@ class Dex2Oat FINAL {
std::unique_ptr<ParserOptions> parser_options(new ParserOptions());
+ AssignIfExists(args, M::CompactDexLevel, &compact_dex_level_);
AssignIfExists(args, M::DexFiles, &dex_filenames_);
AssignIfExists(args, M::DexLocations, &dex_locations_);
AssignIfExists(args, M::OatFiles, &oat_filenames_);
@@ -1809,7 +1812,13 @@ class Dex2Oat FINAL {
// if the boot image has changed. How exactly we'll know is under
// experimentation.
TimingLogger::ScopedTiming time_unquicken("Unquicken", timings_);
- VdexFile::Unquicken(dex_files_, input_vdex_file_->GetQuickeningInfo());
+
+ // We do not decompile a RETURN_VOID_NO_BARRIER into a RETURN_VOID, as the quickening
+ // optimization does not depend on the boot image (the optimization relies on not
+ // having final fields in a class, which does not change for an app).
+ VdexFile::Unquicken(dex_files_,
+ input_vdex_file_->GetQuickeningInfo(),
+ /* decompile_return_instruction */ false);
} else {
// Create the main VerifierDeps, here instead of in the compiler since we want to aggregate
// the results for all the dex files, not just the results for the current dex file.
@@ -2221,8 +2230,12 @@ class Dex2Oat FINAL {
return UseProfile();
}
+ bool DoGenerateCompactDex() const {
+ return compact_dex_level_ != CompactDexLevel::kCompactDexLevelNone;
+ }
+
bool DoDexLayoutOptimizations() const {
- return DoProfileGuidedOptimizations();
+ return DoProfileGuidedOptimizations() || DoGenerateCompactDex();
}
bool DoOatLayoutOptimizations() const {
@@ -2445,7 +2458,8 @@ class Dex2Oat FINAL {
oat_writers_.emplace_back(new linker::OatWriter(
IsBootImage(),
timings_,
- do_oat_writer_layout ? profile_compilation_info_.get() : nullptr));
+ do_oat_writer_layout ? profile_compilation_info_.get() : nullptr,
+ compact_dex_level_));
}
}
@@ -2809,6 +2823,7 @@ class Dex2Oat FINAL {
// Dex files we are compiling, does not include the class path dex files.
std::vector<const DexFile*> dex_files_;
std::string no_inline_from_string_;
+ CompactDexLevel compact_dex_level_ = CompactDexLevel::kCompactDexLevelNone;
std::vector<std::unique_ptr<linker::ElfWriter>> elf_writers_;
std::vector<std::unique_ptr<linker::OatWriter>> oat_writers_;
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index ae7ebe2da1..0a5068112f 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -65,6 +65,7 @@ class Dex2oatImageTest : public CommonRuntimeTest {
std::string error_msg;
CHECK(DexFileLoader::Open(dex.c_str(),
dex,
+ /*verify*/ true,
/*verify_checksum*/ false,
&error_msg,
&dex_files))
diff --git a/dex2oat/dex2oat_options.cc b/dex2oat/dex2oat_options.cc
index 43e6c4d02f..3606c618d6 100644
--- a/dex2oat/dex2oat_options.cc
+++ b/dex2oat/dex2oat_options.cc
@@ -239,6 +239,11 @@ static Parser CreateArgumentParser() {
.Define("--class-loader-context=_")
.WithType<std::string>()
.IntoKey(M::ClassLoaderContext)
+ .Define("--compact-dex-level=_")
+ .WithType<CompactDexLevel>()
+ .WithValueMap({{"none", CompactDexLevel::kCompactDexLevelNone},
+ {"fast", CompactDexLevel::kCompactDexLevelFast}})
+ .IntoKey(M::CompactDexLevel)
.Define("--runtime-arg _")
.WithType<std::vector<std::string>>().AppendValues()
.IntoKey(M::RuntimeOptions);
diff --git a/dex2oat/dex2oat_options.def b/dex2oat/dex2oat_options.def
index 83a3035ed5..9362a3df6f 100644
--- a/dex2oat/dex2oat_options.def
+++ b/dex2oat/dex2oat_options.def
@@ -34,6 +34,7 @@
//
// Parse-able keys from the command line.
+DEX2OAT_OPTIONS_KEY (CompactDexLevel, CompactDexLevel)
DEX2OAT_OPTIONS_KEY (std::vector<std::string>, DexFiles)
DEX2OAT_OPTIONS_KEY (std::vector<std::string>, DexLocations)
DEX2OAT_OPTIONS_KEY (int, ZipFd)
diff --git a/dex2oat/dex2oat_options.h b/dex2oat/dex2oat_options.h
index a4c718625f..f8198ee08b 100644
--- a/dex2oat/dex2oat_options.h
+++ b/dex2oat/dex2oat_options.h
@@ -22,6 +22,7 @@
#include <vector>
#include "base/variant_map.h"
+#include "cdex/compact_dex_level.h"
#include "cmdline_types.h" // TODO: don't need to include this file here
#include "compiler.h"
#include "driver/compiler_options_map.h"
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 1b731fc7f6..cb91978680 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -678,7 +678,8 @@ class Dex2oatLayoutTest : public Dex2oatTest {
const char* location = dex_location.c_str();
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_TRUE(DexFileLoader::Open(location, location, true, &error_msg, &dex_files));
+ ASSERT_TRUE(DexFileLoader::Open(
+ location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& dex_file = dex_files[0];
GenerateProfile(profile_location,
@@ -812,7 +813,8 @@ class Dex2oatLayoutTest : public Dex2oatTest {
const char* location = dex_location.c_str();
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_TRUE(DexFileLoader::Open(location, location, true, &error_msg, &dex_files));
+ ASSERT_TRUE(DexFileLoader::Open(
+ location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& old_dex_file = dex_files[0];
@@ -823,13 +825,13 @@ class Dex2oatLayoutTest : public Dex2oatTest {
ASSERT_LT(class_def_count, std::numeric_limits<uint16_t>::max());
ASSERT_GE(class_def_count, 2U);
- // The new layout swaps the classes at indexes 0 and 1.
+ // Make sure the indexes stay the same.
std::string old_class0 = old_dex_file->PrettyType(old_dex_file->GetClassDef(0).class_idx_);
std::string old_class1 = old_dex_file->PrettyType(old_dex_file->GetClassDef(1).class_idx_);
std::string new_class0 = new_dex_file->PrettyType(new_dex_file->GetClassDef(0).class_idx_);
std::string new_class1 = new_dex_file->PrettyType(new_dex_file->GetClassDef(1).class_idx_);
- EXPECT_EQ(old_class0, new_class1);
- EXPECT_EQ(old_class1, new_class0);
+ EXPECT_EQ(old_class0, new_class0);
+ EXPECT_EQ(old_class1, new_class1);
}
EXPECT_EQ(odex_file->GetCompilerFilter(), CompilerFilter::kSpeedProfile);
@@ -966,6 +968,7 @@ class Dex2oatWatchdogTest : public Dex2oatTest {
std::string swap_location = GetOdexDir() + "/Dex2OatSwapTest.odex.swap";
copy.push_back("--swap-file=" + swap_location);
+ copy.push_back("-j512"); // Excessive idle threads just slow down dex2oat.
GenerateOdexForTest(dex_location,
odex_location,
CompilerFilter::kSpeed,
@@ -1372,9 +1375,19 @@ TEST_F(Dex2oatTest, LayoutSections) {
EXPECT_LT(code_item_offset - section_startup_only.offset_, section_startup_only.size_);
++startup_count;
} else {
- // If no flags are set, the method should be unused.
- EXPECT_LT(code_item_offset - section_unused.offset_, section_unused.size_);
- ++unused_count;
+ if (code_item_offset - section_unused.offset_ < section_unused.size_) {
+ // If no flags are set, the method should be unused ...
+ ++unused_count;
+ } else {
+ // or this method is part of the last code item and the end is 4 byte aligned.
+ ClassDataItemIterator it2(*dex_file, dex_file->GetClassData(*class_def));
+ it2.SkipAllFields();
+ for (; it2.HasNextDirectMethod() || it2.HasNextVirtualMethod(); it2.Next()) {
+ EXPECT_LE(it2.GetMethodCodeItemOffset(), code_item_offset);
+ }
+ uint32_t code_item_size = dex_file->FindCodeItemOffset(*class_def, method_idx);
+ EXPECT_EQ((code_item_offset + code_item_size) % 4, 0u);
+ }
}
}
DCHECK(!it.HasNext());
@@ -1385,4 +1398,87 @@ TEST_F(Dex2oatTest, LayoutSections) {
}
}
+// Test that generating compact dex works.
+TEST_F(Dex2oatTest, GenerateCompactDex) {
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+ // Generate a compact dex based odex.
+ const std::string dir = GetScratchDir();
+ const std::string oat_filename = dir + "/base.oat";
+ const std::string vdex_filename = dir + "/base.vdex";
+ std::string error_msg;
+ const int res = GenerateOdexForTestWithStatus(
+ {dex->GetLocation()},
+ oat_filename,
+ CompilerFilter::Filter::kQuicken,
+ &error_msg,
+ {"--compact-dex-level=fast"});
+ EXPECT_EQ(res, 0);
+ // Open our generated oat file.
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(oat_filename.c_str(),
+ oat_filename.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex->GetLocation().c_str(),
+ &error_msg));
+ ASSERT_TRUE(odex_file != nullptr);
+ std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
+ ASSERT_EQ(oat_dex_files.size(), 1u);
+ // Check that each dex is a compact dex.
+ for (const OatDexFile* oat_dex : oat_dex_files) {
+ std::unique_ptr<const DexFile> dex_file(oat_dex->OpenDexFile(&error_msg));
+ ASSERT_TRUE(dex_file != nullptr) << error_msg;
+ ASSERT_TRUE(dex_file->IsCompactDexFile());
+ }
+}
+
+class Dex2oatVerifierAbort : public Dex2oatTest {};
+
+TEST_F(Dex2oatVerifierAbort, HardFail) {
+ // Use VerifierDeps as it has hard-failing classes.
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("VerifierDeps"));
+ std::string out_dir = GetScratchDir();
+ const std::string base_oat_name = out_dir + "/base.oat";
+ std::string error_msg;
+ const int res_fail = GenerateOdexForTestWithStatus(
+ {dex->GetLocation()},
+ base_oat_name,
+ CompilerFilter::Filter::kQuicken,
+ &error_msg,
+ {"--abort-on-hard-verifier-error"});
+ EXPECT_NE(0, res_fail);
+
+ const int res_no_fail = GenerateOdexForTestWithStatus(
+ {dex->GetLocation()},
+ base_oat_name,
+ CompilerFilter::Filter::kQuicken,
+ &error_msg,
+ {"--no-abort-on-hard-verifier-error"});
+ EXPECT_EQ(0, res_no_fail);
+}
+
+TEST_F(Dex2oatVerifierAbort, SoftFail) {
+ // Use VerifierDepsMulti as it has hard-failing classes.
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("VerifierDepsMulti"));
+ std::string out_dir = GetScratchDir();
+ const std::string base_oat_name = out_dir + "/base.oat";
+ std::string error_msg;
+ const int res_fail = GenerateOdexForTestWithStatus(
+ {dex->GetLocation()},
+ base_oat_name,
+ CompilerFilter::Filter::kQuicken,
+ &error_msg,
+ {"--abort-on-soft-verifier-error"});
+ EXPECT_NE(0, res_fail);
+
+ const int res_no_fail = GenerateOdexForTestWithStatus(
+ {dex->GetLocation()},
+ base_oat_name,
+ CompilerFilter::Filter::kQuicken,
+ &error_msg,
+ {"--no-abort-on-soft-verifier-error"});
+ EXPECT_EQ(0, res_no_fail);
+}
+
} // namespace art
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 492c76bc54..dc570da832 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -247,7 +247,8 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
elf_writers.back()->Start();
oat_writers.emplace_back(new OatWriter(/*compiling_boot_image*/true,
&timings,
- /*profile_compilation_info*/nullptr));
+ /*profile_compilation_info*/nullptr,
+ CompactDexLevel::kCompactDexLevelNone));
}
std::vector<OutputStream*> rodata;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 0fd24a8a8f..fd3feb7a59 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -330,7 +330,10 @@ class OatWriter::OatDexFile {
DCHECK_EQ(static_cast<off_t>(file_offset + offset_), out->Seek(0, kSeekCurrent)) \
<< "file_offset=" << file_offset << " offset_=" << offset_
-OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings, ProfileCompilationInfo* info)
+OatWriter::OatWriter(bool compiling_boot_image,
+ TimingLogger* timings,
+ ProfileCompilationInfo* info,
+ CompactDexLevel compact_dex_level)
: write_state_(WriteState::kAddingDexFileSources),
timings_(timings),
raw_dex_files_(),
@@ -404,7 +407,8 @@ OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings, ProfileCo
size_method_bss_mappings_(0u),
relative_patcher_(nullptr),
absolute_patch_locations_(),
- profile_compilation_info_(info) {
+ profile_compilation_info_(info),
+ compact_dex_level_(compact_dex_level) {
}
bool OatWriter::AddDexFileSource(const char* filename,
@@ -417,7 +421,7 @@ bool OatWriter::AddDexFileSource(const char* filename,
if (fd.Fd() == -1) {
PLOG(ERROR) << "Failed to read magic number from dex file: '" << filename << "'";
return false;
- } else if (DexFileLoader::IsValidMagic(magic)) {
+ } else if (DexFileLoader::IsMagicValid(magic)) {
// The file is open for reading, not writing, so it's OK to let the File destructor
// close it without checking for explicit Close(), so pass checkUsage = false.
raw_dex_files_.emplace_back(new File(fd.Release(), location, /* checkUsage */ false));
@@ -481,7 +485,7 @@ bool OatWriter::AddVdexDexFilesSource(const VdexFile& vdex_file,
return false;
}
- if (!DexFileLoader::IsValidMagic(current_dex_data)) {
+ if (!DexFileLoader::IsMagicValid(current_dex_data)) {
LOG(ERROR) << "Invalid magic in vdex file created from " << location;
return false;
}
@@ -3110,12 +3114,12 @@ bool OatWriter::ReadDexFileHeader(File* file, OatDexFile* oat_dex_file) {
}
bool OatWriter::ValidateDexFileHeader(const uint8_t* raw_header, const char* location) {
- const bool valid_standard_dex_magic = StandardDexFile::IsMagicValid(raw_header);
+ const bool valid_standard_dex_magic = DexFileLoader::IsMagicValid(raw_header);
if (!valid_standard_dex_magic) {
LOG(ERROR) << "Invalid magic number in dex file header. " << " File: " << location;
return false;
}
- if (!StandardDexFile::IsVersionValid(raw_header)) {
+ if (!DexFileLoader::IsVersionAndMagicValid(raw_header)) {
LOG(ERROR) << "Invalid version number in dex file header. " << " File: " << location;
return false;
}
@@ -3160,7 +3164,8 @@ bool OatWriter::WriteDexFile(OutputStream* out,
if (!SeekToDexFile(out, file, oat_dex_file)) {
return false;
}
- if (profile_compilation_info_ != nullptr) {
+ if (profile_compilation_info_ != nullptr ||
+ compact_dex_level_ != CompactDexLevel::kCompactDexLevelNone) {
CHECK(!update_input_vdex) << "We should never update the input vdex when doing dexlayout";
if (!LayoutAndWriteDexFile(out, oat_dex_file)) {
return false;
@@ -3259,7 +3264,8 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
PLOG(ERROR) << "Failed to dup dex file descriptor (" << raw_file->Fd() << ") at " << location;
return false;
}
- dex_file = DexFileLoader::OpenDex(dup_fd, location, /* verify_checksum */ true, &error_msg);
+ dex_file = DexFileLoader::OpenDex(
+ dup_fd, location, /* verify */ true, /* verify_checksum */ true, &error_msg);
} else {
// The source data is a vdex file.
CHECK(oat_dex_file->source_.IsRawData())
@@ -3286,6 +3292,7 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
}
Options options;
options.output_to_memmap_ = true;
+ options.compact_dex_level_ = compact_dex_level_;
DexLayout dex_layout(options, profile_compilation_info_, nullptr);
dex_layout.ProcessDexFile(location.c_str(), dex_file.get(), 0);
std::unique_ptr<MemMap> mem_map(dex_layout.GetAndReleaseMemMap());
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index c742fd4441..6a82fd1d59 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -24,6 +24,7 @@
#include "base/array_ref.h"
#include "base/dchecked_vector.h"
+#include "cdex/compact_dex_level.h"
#include "linker/relative_patcher.h" // For RelativePatcherTargetProvider.
#include "mem_map.h"
#include "method_reference.h"
@@ -114,7 +115,10 @@ class OatWriter {
kDefault = kCreate
};
- OatWriter(bool compiling_boot_image, TimingLogger* timings, ProfileCompilationInfo* info);
+ OatWriter(bool compiling_boot_image,
+ TimingLogger* timings,
+ ProfileCompilationInfo* info,
+ CompactDexLevel compact_dex_level);
// To produce a valid oat file, the user must first add sources with any combination of
// - AddDexFileSource(),
@@ -491,6 +495,9 @@ class OatWriter {
// Profile info used to generate new layout of files.
ProfileCompilationInfo* profile_compilation_info_;
+ // Compact dex level that is generated.
+ CompactDexLevel compact_dex_level_;
+
using OrderedMethodList = std::vector<OrderedMethodData>;
// List of compiled methods, sorted by the order defined in OrderedMethodData.
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index a19057a0ed..3efebfd45f 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -127,7 +127,8 @@ class OatTest : public CommonCompilerTest {
TimingLogger timings("WriteElf", false, false);
OatWriter oat_writer(/*compiling_boot_image*/false,
&timings,
- /*profile_compilation_info*/nullptr);
+ /*profile_compilation_info*/nullptr,
+ CompactDexLevel::kCompactDexLevelNone);
for (const DexFile* dex_file : dex_files) {
ArrayRef<const uint8_t> raw_dex_file(
reinterpret_cast<const uint8_t*>(&dex_file->GetHeader()),
@@ -148,7 +149,10 @@ class OatTest : public CommonCompilerTest {
bool verify,
ProfileCompilationInfo* profile_compilation_info) {
TimingLogger timings("WriteElf", false, false);
- OatWriter oat_writer(/*compiling_boot_image*/false, &timings, profile_compilation_info);
+ OatWriter oat_writer(/*compiling_boot_image*/false,
+ &timings,
+ profile_compilation_info,
+ CompactDexLevel::kCompactDexLevelNone);
for (const char* dex_filename : dex_filenames) {
if (!oat_writer.AddDexFileSource(dex_filename, dex_filename)) {
return false;
@@ -166,7 +170,8 @@ class OatTest : public CommonCompilerTest {
TimingLogger timings("WriteElf", false, false);
OatWriter oat_writer(/*compiling_boot_image*/false,
&timings,
- /*profile_compilation_info*/nullptr);
+ /*profile_compilation_info*/nullptr,
+ CompactDexLevel::kCompactDexLevelNone);
if (!oat_writer.AddZippedDexFilesSource(std::move(zip_fd), location)) {
return false;
}
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index 705043bbeb..4916d643c6 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -29,6 +29,27 @@ art_cc_binary {
],
}
+art_cc_binary {
+ name: "dexdumps",
+ host_supported: true,
+ device_supported: false,
+ srcs: [
+ "dexdump_cfg.cc",
+ "dexdump_main.cc",
+ "dexdump.cc",
+ ],
+ cflags: ["-Wall", "-Werror"],
+ static_libs: [
+ "libart",
+ "libbase",
+ ] + art_static_dependencies,
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
+}
+
art_cc_test {
name: "art_dexdump_tests",
defaults: [
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 3648a3edd0..4bfd91fdd9 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -1883,7 +1883,8 @@ int processFile(const char* fileName) {
const bool kVerifyChecksum = !gOptions.ignoreBadChecksum;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFileLoader::Open(fileName, fileName, kVerifyChecksum, &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(
+ fileName, fileName, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
// Display returned error message to user. Note that this error behavior
// differs from the error messages shown by the original Dalvik dexdump.
fputs(error_msg.c_str(), stderr);
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
index 29c9e92189..3c71770838 100644
--- a/dexlayout/Android.bp
+++ b/dexlayout/Android.bp
@@ -37,7 +37,10 @@ art_cc_library {
art_cc_library {
name: "libartd-dexlayout",
- defaults: ["libart-dexlayout-defaults"],
+ defaults: [
+ "libart-dexlayout-defaults",
+ "art_debug_defaults",
+ ],
shared_libs: ["libartd"],
}
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 0c944cee2c..8c4ee6e9a1 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -39,24 +39,6 @@ static uint64_t ReadVarWidth(const uint8_t** data, uint8_t length, bool sign_ext
return value;
}
-static bool GetPositionsCb(void* context, const DexFile::PositionInfo& entry) {
- DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context);
- PositionInfoVector& positions = debug_info->GetPositionInfo();
- positions.push_back(std::unique_ptr<PositionInfo>(new PositionInfo(entry.address_, entry.line_)));
- return false;
-}
-
-static void GetLocalsCb(void* context, const DexFile::LocalInfo& entry) {
- DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context);
- LocalInfoVector& locals = debug_info->GetLocalInfo();
- const char* name = entry.name_ != nullptr ? entry.name_ : "(null)";
- const char* descriptor = entry.descriptor_ != nullptr ? entry.descriptor_ : "";
- const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
- locals.push_back(std::unique_ptr<LocalInfo>(
- new LocalInfo(name, descriptor, signature, entry.start_address_, entry.end_address_,
- entry.reg_)));
-}
-
static uint32_t GetDebugInfoStreamSize(const uint8_t* debug_info_stream) {
const uint8_t* stream = debug_info_stream;
DecodeUnsignedLeb128(&stream); // line_start
@@ -421,8 +403,23 @@ EncodedArrayItem* Collections::CreateEncodedArrayItem(const uint8_t* static_data
return encoded_array_item;
}
-AnnotationItem* Collections::CreateAnnotationItem(const DexFile::AnnotationItem* annotation,
- uint32_t offset) {
+void Collections::AddAnnotationsFromMapListSection(const DexFile& dex_file,
+ uint32_t start_offset,
+ uint32_t count) {
+ uint32_t current_offset = start_offset;
+ for (size_t i = 0; i < count; ++i) {
+ // Annotation that we didn't process already, add it to the set.
+ const DexFile::AnnotationItem* annotation = dex_file.GetAnnotationItemAtOffset(current_offset);
+ AnnotationItem* annotation_item = CreateAnnotationItem(dex_file, annotation);
+ DCHECK(annotation_item != nullptr);
+ current_offset += annotation_item->GetSize();
+ }
+}
+
+AnnotationItem* Collections::CreateAnnotationItem(const DexFile& dex_file,
+ const DexFile::AnnotationItem* annotation) {
+ const uint8_t* const start_data = reinterpret_cast<const uint8_t*>(annotation);
+ const uint32_t offset = start_data - dex_file.Begin();
auto found_annotation_item = AnnotationItems().find(offset);
if (found_annotation_item != AnnotationItems().end()) {
return found_annotation_item->second.get();
@@ -431,10 +428,11 @@ AnnotationItem* Collections::CreateAnnotationItem(const DexFile::AnnotationItem*
const uint8_t* annotation_data = annotation->annotation_;
std::unique_ptr<EncodedValue> encoded_value(
ReadEncodedValue(&annotation_data, DexFile::kDexAnnotationAnnotation, 0));
- // TODO: Calculate the size of the annotation.
AnnotationItem* annotation_item =
new AnnotationItem(visibility, encoded_value->ReleaseEncodedAnnotation());
- annotation_items_.AddItem(annotation_item, offset);
+ annotation_item->SetOffset(offset);
+ annotation_item->SetSize(annotation_data - start_data);
+ annotation_items_.AddItem(annotation_item, annotation_item->GetOffset());
return annotation_item;
}
@@ -455,8 +453,7 @@ AnnotationSetItem* Collections::CreateAnnotationSetItem(const DexFile& dex_file,
if (annotation == nullptr) {
continue;
}
- AnnotationItem* annotation_item =
- CreateAnnotationItem(annotation, disk_annotations_item->entries_[i]);
+ AnnotationItem* annotation_item = CreateAnnotationItem(dex_file, annotation);
items->push_back(annotation_item);
}
AnnotationSetItem* annotation_set_item = new AnnotationSetItem(items);
@@ -694,12 +691,6 @@ MethodItem* Collections::GenerateMethodItem(const DexFile& dex_file, ClassDataIt
}
debug_info = code_item->DebugInfo();
}
- if (debug_info != nullptr) {
- bool is_static = (access_flags & kAccStatic) != 0;
- dex_file.DecodeDebugLocalInfo(
- disk_code_item, is_static, cdii.GetMemberIndex(), GetLocalsCb, debug_info);
- dex_file.DecodeDebugPositionInfo(disk_code_item, GetPositionsCb, debug_info);
- }
return new MethodItem(access_flags, method_id, code_item);
}
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 5dcc87dd2e..99a66f348c 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -208,7 +208,8 @@ class Collections {
TypeList* CreateTypeList(const DexFile::TypeList* type_list, uint32_t offset);
EncodedArrayItem* CreateEncodedArrayItem(const uint8_t* static_data, uint32_t offset);
- AnnotationItem* CreateAnnotationItem(const DexFile::AnnotationItem* annotation, uint32_t offset);
+ AnnotationItem* CreateAnnotationItem(const DexFile& dex_file,
+ const DexFile::AnnotationItem* annotation);
AnnotationSetItem* CreateAnnotationSetItem(const DexFile& dex_file,
const DexFile::AnnotationSetItem* disk_annotations_item, uint32_t offset);
AnnotationsDirectoryItem* CreateAnnotationsDirectoryItem(const DexFile& dex_file,
@@ -216,6 +217,9 @@ class Collections {
CodeItem* CreateCodeItem(
const DexFile& dex_file, const DexFile::CodeItem& disk_code_item, uint32_t offset);
ClassData* CreateClassData(const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset);
+ void AddAnnotationsFromMapListSection(const DexFile& dex_file,
+ uint32_t start_offset,
+ uint32_t count);
StringId* GetStringId(uint32_t index) {
CHECK_LT(index, StringIdsSize());
@@ -966,39 +970,6 @@ class CodeItem : public Item {
DISALLOW_COPY_AND_ASSIGN(CodeItem);
};
-struct PositionInfo {
- PositionInfo(uint32_t address, uint32_t line) : address_(address), line_(line) { }
-
- uint32_t address_;
- uint32_t line_;
-};
-
-using PositionInfoVector = std::vector<std::unique_ptr<PositionInfo>>;
-
-struct LocalInfo {
- LocalInfo(const char* name,
- const char* descriptor,
- const char* signature,
- uint32_t start_address,
- uint32_t end_address,
- uint16_t reg)
- : name_(name),
- descriptor_(descriptor),
- signature_(signature),
- start_address_(start_address),
- end_address_(end_address),
- reg_(reg) { }
-
- std::string name_;
- std::string descriptor_;
- std::string signature_;
- uint32_t start_address_;
- uint32_t end_address_;
- uint16_t reg_;
-};
-
-using LocalInfoVector = std::vector<std::unique_ptr<LocalInfo>>;
-
class DebugInfoItem : public Item {
public:
DebugInfoItem(uint32_t debug_info_size, uint8_t* debug_info)
@@ -1007,16 +978,10 @@ class DebugInfoItem : public Item {
uint32_t GetDebugInfoSize() const { return debug_info_size_; }
uint8_t* GetDebugInfo() const { return debug_info_.get(); }
- PositionInfoVector& GetPositionInfo() { return positions_; }
- LocalInfoVector& GetLocalInfo() { return locals_; }
-
private:
uint32_t debug_info_size_;
std::unique_ptr<uint8_t[]> debug_info_;
- PositionInfoVector positions_;
- LocalInfoVector locals_;
-
DISALLOW_COPY_AND_ASSIGN(DebugInfoItem);
};
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index 8eb726a64a..bd3e1fa718 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -152,6 +152,7 @@ static void CheckAndSetRemainingOffsets(const DexFile& dex_file, Collections* co
break;
case DexFile::kDexTypeAnnotationItem:
collections->SetAnnotationItemsOffset(item->offset_);
+ collections->AddAnnotationsFromMapListSection(dex_file, item->offset_, item->size_);
break;
case DexFile::kDexTypeEncodedArrayItem:
collections->SetEncodedArrayItemsOffset(item->offset_);
diff --git a/dexlayout/dex_verify.cc b/dexlayout/dex_verify.cc
index 54581292ff..18ddc86e0c 100644
--- a/dexlayout/dex_verify.cc
+++ b/dexlayout/dex_verify.cc
@@ -893,109 +893,24 @@ bool VerifyDebugInfo(dex_ir::DebugInfoItem* orig,
}
return true;
}
- if (!VerifyPositionInfo(orig->GetPositionInfo(),
- output->GetPositionInfo(),
- orig->GetOffset(),
- error_msg)) {
+ // TODO: Test for debug equivalence rather than byte array equality.
+ uint32_t orig_size = orig->GetDebugInfoSize();
+ uint32_t output_size = output->GetDebugInfoSize();
+ if (orig_size != output_size) {
+ *error_msg = "DebugInfoSize disagreed.";
return false;
}
- return VerifyLocalInfo(orig->GetLocalInfo(),
- output->GetLocalInfo(),
- orig->GetOffset(),
- error_msg);
-}
-
-bool VerifyPositionInfo(dex_ir::PositionInfoVector& orig,
- dex_ir::PositionInfoVector& output,
- uint32_t orig_offset,
- std::string* error_msg) {
- if (orig.size() != output.size()) {
- *error_msg = StringPrintf(
- "Mismatched number of positions for debug info at offset %x: %zu vs %zu.",
- orig_offset,
- orig.size(),
- output.size());
+ uint8_t* orig_data = orig->GetDebugInfo();
+ uint8_t* output_data = output->GetDebugInfo();
+ if ((orig_data == nullptr && output_data != nullptr) ||
+ (orig_data != nullptr && output_data == nullptr)) {
+ *error_msg = "DebugInfo null/non-null mismatch.";
return false;
}
- for (size_t i = 0; i < orig.size(); ++i) {
- if (orig[i]->address_ != output[i]->address_) {
- *error_msg = StringPrintf(
- "Mismatched position address for debug info at offset %x: %u vs %u.",
- orig_offset,
- orig[i]->address_,
- output[i]->address_);
- return false;
- }
- if (orig[i]->line_ != output[i]->line_) {
- *error_msg = StringPrintf("Mismatched position line for debug info at offset %x: %u vs %u.",
- orig_offset,
- orig[i]->line_,
- output[i]->line_);
- return false;
- }
- }
- return true;
-}
-
-bool VerifyLocalInfo(dex_ir::LocalInfoVector& orig,
- dex_ir::LocalInfoVector& output,
- uint32_t orig_offset,
- std::string* error_msg) {
- if (orig.size() != output.size()) {
- *error_msg = StringPrintf(
- "Mismatched number of locals for debug info at offset %x: %zu vs %zu.",
- orig_offset,
- orig.size(),
- output.size());
+ if (memcmp(orig_data, output_data, orig_size) != 0) {
+ *error_msg = "DebugInfo bytes mismatch.";
return false;
}
- for (size_t i = 0; i < orig.size(); ++i) {
- if (orig[i]->name_ != output[i]->name_) {
- *error_msg = StringPrintf("Mismatched local name for debug info at offset %x: %s vs %s.",
- orig_offset,
- orig[i]->name_.c_str(),
- output[i]->name_.c_str());
- return false;
- }
- if (orig[i]->descriptor_ != output[i]->descriptor_) {
- *error_msg = StringPrintf(
- "Mismatched local descriptor for debug info at offset %x: %s vs %s.",
- orig_offset,
- orig[i]->descriptor_.c_str(),
- output[i]->descriptor_.c_str());
- return false;
- }
- if (orig[i]->signature_ != output[i]->signature_) {
- *error_msg = StringPrintf("Mismatched local signature for debug info at offset %x: %s vs %s.",
- orig_offset,
- orig[i]->signature_.c_str(),
- output[i]->signature_.c_str());
- return false;
- }
- if (orig[i]->start_address_ != output[i]->start_address_) {
- *error_msg = StringPrintf(
- "Mismatched local start address for debug info at offset %x: %u vs %u.",
- orig_offset,
- orig[i]->start_address_,
- output[i]->start_address_);
- return false;
- }
- if (orig[i]->end_address_ != output[i]->end_address_) {
- *error_msg = StringPrintf(
- "Mismatched local end address for debug info at offset %x: %u vs %u.",
- orig_offset,
- orig[i]->end_address_,
- output[i]->end_address_);
- return false;
- }
- if (orig[i]->reg_ != output[i]->reg_) {
- *error_msg = StringPrintf("Mismatched local reg for debug info at offset %x: %u vs %u.",
- orig_offset,
- orig[i]->reg_,
- output[i]->reg_);
- return false;
- }
- }
return true;
}
diff --git a/dexlayout/dex_verify.h b/dexlayout/dex_verify.h
index 58c95d6947..998939bbce 100644
--- a/dexlayout/dex_verify.h
+++ b/dexlayout/dex_verify.h
@@ -100,14 +100,6 @@ bool VerifyCode(dex_ir::CodeItem* orig, dex_ir::CodeItem* output, std::string* e
bool VerifyDebugInfo(dex_ir::DebugInfoItem* orig,
dex_ir::DebugInfoItem* output,
std::string* error_msg);
-bool VerifyPositionInfo(dex_ir::PositionInfoVector& orig,
- dex_ir::PositionInfoVector& output,
- uint32_t orig_offset,
- std::string* error_msg);
-bool VerifyLocalInfo(dex_ir::LocalInfoVector& orig,
- dex_ir::LocalInfoVector& output,
- uint32_t orig_offset,
- std::string* error_msg);
bool VerifyTries(dex_ir::TryItemVector* orig,
dex_ir::TryItemVector* output,
uint32_t orig_offset,
diff --git a/dexlayout/dex_writer.cc b/dexlayout/dex_writer.cc
index 11ba2a6357..8c821066d2 100644
--- a/dexlayout/dex_writer.cc
+++ b/dexlayout/dex_writer.cc
@@ -23,7 +23,9 @@
#include <queue>
#include <vector>
+#include "cdex/compact_dex_file.h"
#include "dex_file_types.h"
+#include "standard_dex_file.h"
#include "utf.h"
namespace art {
@@ -630,7 +632,18 @@ void DexWriter::WriteHeader() {
uint32_t buffer[20];
dex_ir::Collections& collections = header_->GetCollections();
size_t offset = 0;
- offset += Write(header_->Magic(), 8 * sizeof(uint8_t), offset);
+ if (compact_dex_level_ != CompactDexLevel::kCompactDexLevelNone) {
+ static constexpr size_t kMagicAndVersionLen =
+ CompactDexFile::kDexMagicSize + CompactDexFile::kDexVersionLen;
+ uint8_t magic_and_version[kMagicAndVersionLen] = {};
+ CompactDexFile::WriteMagic(&magic_and_version[0]);
+ CompactDexFile::WriteCurrentVersion(&magic_and_version[0]);
+ offset += Write(magic_and_version, kMagicAndVersionLen * sizeof(uint8_t), offset);
+ } else {
+ static constexpr size_t kMagicAndVersionLen =
+ StandardDexFile::kDexMagicSize + StandardDexFile::kDexVersionLen;
+ offset += Write(header_->Magic(), kMagicAndVersionLen * sizeof(uint8_t), offset);
+ }
buffer[0] = header_->Checksum();
offset += Write(buffer, sizeof(uint32_t), offset);
offset += Write(header_->Signature(), 20 * sizeof(uint8_t), offset);
@@ -681,8 +694,8 @@ void DexWriter::WriteMemMap() {
WriteHeader();
}
-void DexWriter::Output(dex_ir::Header* header, MemMap* mem_map) {
- DexWriter dex_writer(header, mem_map);
+void DexWriter::Output(dex_ir::Header* header, MemMap* mem_map, CompactDexLevel compact_dex_level) {
+ DexWriter dex_writer(header, mem_map, compact_dex_level);
dex_writer.WriteMemMap();
}
diff --git a/dexlayout/dex_writer.h b/dexlayout/dex_writer.h
index b396adf126..d932b9f006 100644
--- a/dexlayout/dex_writer.h
+++ b/dexlayout/dex_writer.h
@@ -20,6 +20,7 @@
#define ART_DEXLAYOUT_DEX_WRITER_H_
#include "base/unix_file/fd_file.h"
+#include "cdex/compact_dex_level.h"
#include "dex_ir.h"
#include "mem_map.h"
#include "os.h"
@@ -28,9 +29,14 @@ namespace art {
class DexWriter {
public:
- DexWriter(dex_ir::Header* header, MemMap* mem_map) : header_(header), mem_map_(mem_map) { }
+ DexWriter(dex_ir::Header* header,
+ MemMap* mem_map,
+ CompactDexLevel compact_dex_level)
+ : header_(header),
+ mem_map_(mem_map),
+ compact_dex_level_(compact_dex_level) { }
- static void Output(dex_ir::Header* header, MemMap* mem_map);
+ static void Output(dex_ir::Header* header, MemMap* mem_map, CompactDexLevel compact_dex_level);
private:
void WriteMemMap();
@@ -66,6 +72,7 @@ class DexWriter {
dex_ir::Header* const header_;
MemMap* const mem_map_;
+ const CompactDexLevel compact_dex_level_;
DISALLOW_COPY_AND_ASSIGN(DexWriter);
};
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index ade00723fd..9a2ab665ba 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -52,6 +52,11 @@ namespace art {
using android::base::StringPrintf;
+// Setting this to false disables class def layout entirely, which is stronger than strictly
+// necessary to ensure the partial order w.r.t. class derivation. TODO: Re-enable (b/68317550).
+static constexpr bool kChangeClassDefOrder = false;
+
+static constexpr uint32_t kDataSectionAlignment = sizeof(uint32_t) * 2;
static constexpr uint32_t kDexCodeItemAlignment = 4;
/*
@@ -820,37 +825,6 @@ void DexLayout::DumpCatches(const dex_ir::CodeItem* code) {
}
/*
- * Dumps all positions table entries associated with the code.
- */
-void DexLayout::DumpPositionInfo(const dex_ir::CodeItem* code) {
- dex_ir::DebugInfoItem* debug_info = code->DebugInfo();
- if (debug_info == nullptr) {
- return;
- }
- std::vector<std::unique_ptr<dex_ir::PositionInfo>>& positions = debug_info->GetPositionInfo();
- for (size_t i = 0; i < positions.size(); ++i) {
- fprintf(out_file_, " 0x%04x line=%d\n", positions[i]->address_, positions[i]->line_);
- }
-}
-
-/*
- * Dumps all locals table entries associated with the code.
- */
-void DexLayout::DumpLocalInfo(const dex_ir::CodeItem* code) {
- dex_ir::DebugInfoItem* debug_info = code->DebugInfo();
- if (debug_info == nullptr) {
- return;
- }
- std::vector<std::unique_ptr<dex_ir::LocalInfo>>& locals = debug_info->GetLocalInfo();
- for (size_t i = 0; i < locals.size(); ++i) {
- dex_ir::LocalInfo* entry = locals[i].get();
- fprintf(out_file_, " 0x%04x - 0x%04x reg=%d %s %s %s\n",
- entry->start_address_, entry->end_address_, entry->reg_,
- entry->name_.c_str(), entry->descriptor_.c_str(), entry->signature_.c_str());
- }
-}
-
-/*
* Dumps a single instruction.
*/
void DexLayout::DumpInstruction(const dex_ir::CodeItem* code,
@@ -1093,9 +1067,59 @@ void DexLayout::DumpBytecodes(uint32_t idx, const dex_ir::CodeItem* code, uint32
}
/*
+ * Callback for dumping each positions table entry.
+ */
+static bool DumpPositionsCb(void* context, const DexFile::PositionInfo& entry) {
+ FILE* out_file = reinterpret_cast<FILE*>(context);
+ fprintf(out_file, " 0x%04x line=%d\n", entry.address_, entry.line_);
+ return false;
+}
+
+/*
+ * Callback for dumping locals table entry.
+ */
+static void DumpLocalsCb(void* context, const DexFile::LocalInfo& entry) {
+ const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
+ FILE* out_file = reinterpret_cast<FILE*>(context);
+ fprintf(out_file, " 0x%04x - 0x%04x reg=%d %s %s %s\n",
+ entry.start_address_, entry.end_address_, entry.reg_,
+ entry.name_, entry.descriptor_, signature);
+}
+
+/*
+ * Lookup functions.
+ */
+static const char* StringDataByIdx(uint32_t idx, dex_ir::Collections& collections) {
+ dex_ir::StringId* string_id = collections.GetStringIdOrNullPtr(idx);
+ if (string_id == nullptr) {
+ return nullptr;
+ }
+ return string_id->Data();
+}
+
+static const char* StringDataByTypeIdx(uint16_t idx, dex_ir::Collections& collections) {
+ dex_ir::TypeId* type_id = collections.GetTypeIdOrNullPtr(idx);
+ if (type_id == nullptr) {
+ return nullptr;
+ }
+ dex_ir::StringId* string_id = type_id->GetStringId();
+ if (string_id == nullptr) {
+ return nullptr;
+ }
+ return string_id->Data();
+}
+
+
+/*
* Dumps code of a method.
*/
-void DexLayout::DumpCode(uint32_t idx, const dex_ir::CodeItem* code, uint32_t code_offset) {
+void DexLayout::DumpCode(uint32_t idx,
+ const dex_ir::CodeItem* code,
+ uint32_t code_offset,
+ const char* declaring_class_descriptor,
+ const char* method_name,
+ bool is_static,
+ const dex_ir::ProtoId* proto) {
fprintf(out_file_, " registers : %d\n", code->RegistersSize());
fprintf(out_file_, " ins : %d\n", code->InsSize());
fprintf(out_file_, " outs : %d\n", code->OutsSize());
@@ -1111,10 +1135,48 @@ void DexLayout::DumpCode(uint32_t idx, const dex_ir::CodeItem* code, uint32_t co
DumpCatches(code);
// Positions and locals table in the debug info.
+ dex_ir::DebugInfoItem* debug_info = code->DebugInfo();
fprintf(out_file_, " positions : \n");
- DumpPositionInfo(code);
+ if (debug_info != nullptr) {
+ DexFile::DecodeDebugPositionInfo(debug_info->GetDebugInfo(),
+ [this](uint32_t idx) {
+ return StringDataByIdx(idx, this->header_->GetCollections());
+ },
+ DumpPositionsCb,
+ out_file_);
+ }
fprintf(out_file_, " locals : \n");
- DumpLocalInfo(code);
+ if (debug_info != nullptr) {
+ std::vector<const char*> arg_descriptors;
+ const dex_ir::TypeList* parameters = proto->Parameters();
+ if (parameters != nullptr) {
+ const dex_ir::TypeIdVector* parameter_type_vector = parameters->GetTypeList();
+ if (parameter_type_vector != nullptr) {
+ for (const dex_ir::TypeId* type_id : *parameter_type_vector) {
+ arg_descriptors.push_back(type_id->GetStringId()->Data());
+ }
+ }
+ }
+ DexFile::DecodeDebugLocalInfo(debug_info->GetDebugInfo(),
+ "DexLayout in-memory",
+ declaring_class_descriptor,
+ arg_descriptors,
+ method_name,
+ is_static,
+ code->RegistersSize(),
+ code->InsSize(),
+ code->InsnsSize(),
+ [this](uint32_t idx) {
+ return StringDataByIdx(idx, this->header_->GetCollections());
+ },
+ [this](uint32_t idx) {
+ return
+ StringDataByTypeIdx(dchecked_integral_cast<uint16_t>(idx),
+ this->header_->GetCollections());
+ },
+ DumpLocalsCb,
+ out_file_);
+ }
}
/*
@@ -1141,7 +1203,13 @@ void DexLayout::DumpMethod(uint32_t idx, uint32_t flags, const dex_ir::CodeItem*
fprintf(out_file_, " code : (none)\n");
} else {
fprintf(out_file_, " code -\n");
- DumpCode(idx, code, code->GetOffset());
+ DumpCode(idx,
+ code,
+ code->GetOffset(),
+ back_descriptor,
+ name,
+ (flags & kAccStatic) != 0,
+ method_id->Proto());
}
if (options_.disassemble_) {
fputc('\n', out_file_);
@@ -1518,9 +1586,13 @@ std::vector<dex_ir::ClassData*> DexLayout::LayoutClassDefsAndClassData(const Dex
std::vector<dex_ir::ClassData*> new_class_data_order;
for (uint32_t i = 0; i < new_class_def_order.size(); ++i) {
dex_ir::ClassDef* class_def = new_class_def_order[i];
- class_def->SetIndex(i);
- class_def->SetOffset(class_defs_offset);
- class_defs_offset += dex_ir::ClassDef::ItemSize();
+ if (kChangeClassDefOrder) {
+ // This produces dex files that violate the spec since the super class class_def is supposed
+ // to occur before any subclasses.
+ class_def->SetIndex(i);
+ class_def->SetOffset(class_defs_offset);
+ class_defs_offset += dex_ir::ClassDef::ItemSize();
+ }
dex_ir::ClassData* class_data = class_def->GetClassData();
if (class_data != nullptr && visited_class_data.find(class_data) == visited_class_data.end()) {
class_data->SetOffset(class_data_offset);
@@ -1532,7 +1604,7 @@ std::vector<dex_ir::ClassData*> DexLayout::LayoutClassDefsAndClassData(const Dex
return new_class_data_order;
}
-void DexLayout::LayoutStringData(const DexFile* dex_file) {
+int32_t DexLayout::LayoutStringData(const DexFile* dex_file) {
const size_t num_strings = header_->GetCollections().StringIds().size();
std::vector<bool> is_shorty(num_strings, false);
std::vector<bool> from_hot_method(num_strings, false);
@@ -1645,13 +1717,11 @@ void DexLayout::LayoutStringData(const DexFile* dex_file) {
offset += data->GetSize() + 1; // Add one extra for null.
}
if (offset > max_offset) {
- const uint32_t diff = offset - max_offset;
+ return offset - max_offset;
// If we expanded the string data section, we need to update the offsets or else we will
// corrupt the next section when writing out.
- FixupSections(header_->GetCollections().StringDatasOffset(), diff);
- // Update file size.
- header_->SetFileSize(header_->FileSize() + diff);
}
+ return 0;
}
// Orders code items according to specified class data ordering.
@@ -1731,6 +1801,10 @@ int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
}
}
+ // Removing duplicate CodeItems may expose other issues with downstream
+ // optimizations such as quickening. But we need to ensure at least the weak
+ // forms of it currently in use do not break layout optimizations.
+ std::map<dex_ir::CodeItem*, uint32_t> original_code_item_offset;
// Total_diff includes diffs generated by clinits, executed, and non-executed methods.
int32_t total_diff = 0;
// The relative placement has no effect on correctness; it is used to ensure
@@ -1749,11 +1823,22 @@ int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
dex_ir::CodeItem* code_item = method->GetCodeItem();
if (code_item != nullptr &&
code_items_set.find(code_item) != code_items_set.end()) {
- diff += UnsignedLeb128Size(code_item_offset)
- - UnsignedLeb128Size(code_item->GetOffset());
- code_item->SetOffset(code_item_offset);
- code_item_offset +=
- RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
+ // Compute where the CodeItem was originally laid out.
+ uint32_t original_offset = code_item->GetOffset();
+ auto it = original_code_item_offset.find(code_item);
+ if (it != original_code_item_offset.end()) {
+ original_offset = it->second;
+ } else {
+ original_code_item_offset[code_item] = code_item->GetOffset();
+ // Assign the new offset and move the pointer to allocate space.
+ code_item->SetOffset(code_item_offset);
+ code_item_offset +=
+ RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
+ }
+ // Update the size of the encoded methods to reflect that the offset difference
+ // may have changed the ULEB128 length.
+ diff +=
+ UnsignedLeb128Size(code_item->GetOffset()) - UnsignedLeb128Size(original_offset);
}
}
}
@@ -1876,13 +1961,23 @@ void DexLayout::FixupSections(uint32_t offset, uint32_t diff) {
}
void DexLayout::LayoutOutputFile(const DexFile* dex_file) {
- LayoutStringData(dex_file);
+ const int32_t string_diff = LayoutStringData(dex_file);
+ // If we expanded the string data section, we need to update the offsets or else we will
+ // corrupt the next section when writing out.
+ FixupSections(header_->GetCollections().StringDatasOffset(), string_diff);
+ // Update file size.
+ header_->SetFileSize(header_->FileSize() + string_diff);
+
std::vector<dex_ir::ClassData*> new_class_data_order = LayoutClassDefsAndClassData(dex_file);
- int32_t diff = LayoutCodeItems(dex_file, new_class_data_order);
+ const int32_t code_item_diff = LayoutCodeItems(dex_file, new_class_data_order);
// Move sections after ClassData by diff bytes.
- FixupSections(header_->GetCollections().ClassDatasOffset(), diff);
- // Update file size.
- header_->SetFileSize(header_->FileSize() + diff);
+ FixupSections(header_->GetCollections().ClassDatasOffset(), code_item_diff);
+
+ // Update file and data size.
+ // The data size must be aligned to kDataSectionAlignment.
+ const int32_t total_diff = code_item_diff + string_diff;
+ header_->SetDataSize(RoundUp(header_->DataSize() + total_diff, kDataSectionAlignment));
+ header_->SetFileSize(header_->FileSize() + total_diff);
}
void DexLayout::OutputDexFile(const DexFile* dex_file) {
@@ -1923,29 +2018,10 @@ void DexLayout::OutputDexFile(const DexFile* dex_file) {
}
return;
}
- DexWriter::Output(header_, mem_map_.get());
+ DexWriter::Output(header_, mem_map_.get(), options_.compact_dex_level_);
if (new_file != nullptr) {
UNUSED(new_file->FlushCloseOrErase());
}
- // Verify the output dex file's structure for debug builds.
- if (kIsDebugBuild) {
- std::string location = "memory mapped file for " + dex_file_location;
- std::unique_ptr<const DexFile> output_dex_file(DexFileLoader::Open(mem_map_->Begin(),
- mem_map_->Size(),
- location,
- header_->Checksum(),
- /*oat_dex_file*/ nullptr,
- /*verify*/ true,
- /*verify_checksum*/ false,
- &error_msg));
- DCHECK(output_dex_file != nullptr) << "Failed to re-open output file:" << error_msg;
- }
- // Do IR-level comparison between input and output. This check ignores potential differences
- // due to layout, so offsets are not checked. Instead, it checks the data contents of each item.
- if (kIsDebugBuild || options_.verify_output_) {
- std::unique_ptr<dex_ir::Header> orig_header(dex_ir::DexIrBuilder(*dex_file));
- CHECK(VerifyOutputDexFile(orig_header.get(), header_, &error_msg)) << error_msg;
- }
}
/*
@@ -1977,12 +2053,38 @@ void DexLayout::ProcessDexFile(const char* file_name,
DumpDexFile();
}
- // Output dex file as file or memmap.
+ // In case we are outputting to a file, keep it open so we can verify.
if (options_.output_dex_directory_ != nullptr || options_.output_to_memmap_) {
if (info_ != nullptr) {
LayoutOutputFile(dex_file);
}
OutputDexFile(dex_file);
+
+ // Clear header before verifying to reduce peak RAM usage.
+ header.reset();
+
+ // Verify the output dex file's structure, only enabled by default for debug builds.
+ if (options_.verify_output_) {
+ std::string error_msg;
+ std::string location = "memory mapped file for " + std::string(file_name);
+ std::unique_ptr<const DexFile> output_dex_file(DexFileLoader::Open(mem_map_->Begin(),
+ mem_map_->Size(),
+ location,
+ /* checksum */ 0,
+ /*oat_dex_file*/ nullptr,
+ /*verify*/ true,
+ /*verify_checksum*/ false,
+ &error_msg));
+ CHECK(output_dex_file != nullptr) << "Failed to re-open output file:" << error_msg;
+
+ // Do IR-level comparison between input and output. This check ignores potential differences
+ // due to layout, so offsets are not checked. Instead, it checks the data contents of each item.
+ //
+ // Regenerate output IR to catch any bugs that might happen during writing.
+ std::unique_ptr<dex_ir::Header> output_header(dex_ir::DexIrBuilder(*output_dex_file));
+ std::unique_ptr<dex_ir::Header> orig_header(dex_ir::DexIrBuilder(*dex_file));
+ CHECK(VerifyOutputDexFile(output_header.get(), orig_header.get(), &error_msg)) << error_msg;
+ }
}
}
@@ -1999,7 +2101,8 @@ int DexLayout::ProcessFile(const char* file_name) {
const bool verify_checksum = !options_.ignore_bad_checksum_;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFileLoader::Open(file_name, file_name, verify_checksum, &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(
+ file_name, file_name, /* verify */ true, verify_checksum, &error_msg, &dex_files)) {
// Display returned error message to user. Note that this error behavior
// differs from the error messages shown by the original Dalvik dexdump.
fputs(error_msg.c_str(), stderr);
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index 9f6e8a4122..2e897739cc 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -26,6 +26,7 @@
#include <stdint.h>
#include <stdio.h>
+#include "cdex/compact_dex_level.h"
#include "dex_file_layout.h"
#include "dex_ir.h"
#include "mem_map.h"
@@ -59,9 +60,9 @@ class Options {
bool show_section_headers_ = false;
bool show_section_statistics_ = false;
bool verbose_ = false;
- // TODO: Set verify_output_ back to false by default. Was set to true for debugging b/62840842.
- bool verify_output_ = true;
+ bool verify_output_ = kIsDebugBuild;
bool visualize_pattern_ = false;
+ CompactDexLevel compact_dex_level_ = CompactDexLevel::kCompactDexLevelNone;
OutputFormat output_format_ = kOutputPlain;
const char* output_dex_directory_ = nullptr;
const char* output_file_name_ = nullptr;
@@ -96,7 +97,13 @@ class DexLayout {
void DumpClass(int idx, char** last_package);
void DumpClassAnnotations(int idx);
void DumpClassDef(int idx);
- void DumpCode(uint32_t idx, const dex_ir::CodeItem* code, uint32_t code_offset);
+ void DumpCode(uint32_t idx,
+ const dex_ir::CodeItem* code,
+ uint32_t code_offset,
+ const char* declaring_class_descriptor,
+ const char* method_name,
+ bool is_static,
+ const dex_ir::ProtoId* proto);
void DumpEncodedAnnotation(dex_ir::EncodedAnnotation* annotation);
void DumpEncodedValue(const dex_ir::EncodedValue* data);
void DumpFileHeader();
@@ -116,7 +123,7 @@ class DexLayout {
std::vector<dex_ir::ClassData*> LayoutClassDefsAndClassData(const DexFile* dex_file);
int32_t LayoutCodeItems(const DexFile* dex_file,
std::vector<dex_ir::ClassData*> new_class_data_order);
- void LayoutStringData(const DexFile* dex_file);
+ int32_t LayoutStringData(const DexFile* dex_file);
bool IsNextSectionCodeItemAligned(uint32_t offset);
template<class T> void FixupSection(std::map<uint32_t, std::unique_ptr<T>>& map, uint32_t diff);
void FixupSections(uint32_t offset, uint32_t diff);
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index f8fa893069..f34e7ecd4b 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -325,7 +325,8 @@ class DexLayoutTest : public CommonRuntimeTest {
std::string error_msg;
bool result = DexFileLoader::Open(input_dex.c_str(),
input_dex,
- false,
+ /*verify*/ true,
+ /*verify_checksum*/ false,
&error_msg,
&dex_files);
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index e5870522a3..c8bc132da0 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -179,7 +179,8 @@ static int processFile(const char* fileName) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFileLoader::Open(fileName, fileName, kVerifyChecksum, &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(
+ fileName, fileName, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
fputs(error_msg.c_str(), stderr);
fputc('\n', stderr);
return -1;
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 938ea5dc2f..7c6a3251f7 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -479,15 +479,23 @@ static const MipsInstruction gMipsInstructions[] = {
{ kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0x9, "srli", "kmW" },
{ kMsaMask | (0x3ff << 16), kMsa | (0xbe << 16) | 0x19, "move.v", "km" },
{ kMsaMask | (0xf << 22), kMsa | (0x1 << 22) | 0x19, "splati", "kX" },
+ { kMsaMask | (0xf << 22), kMsa | (0x2 << 22) | 0x19, "copy_s", "yX" },
+ { kMsaMask | (0xf << 22), kMsa | (0x3 << 22) | 0x19, "copy_u", "yX" },
+ { kMsaMask | (0xf << 22), kMsa | (0x4 << 22) | 0x19, "insert", "YD" },
{ kMsaMask | (0xff << 18), kMsa | (0xc0 << 18) | 0x1e, "fill", "vkD" },
{ kMsaMask | (0x7 << 23), kMsa | (0x6 << 23) | 0x7, "ldi", "kx" },
{ kMsaSpecialMask | (0xf << 2), kMsa | (0x8 << 2), "ld", "kw" },
{ kMsaSpecialMask | (0xf << 2), kMsa | (0x9 << 2), "st", "kw" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x4 << 23) | 0x14, "ilvl", "Vkmn" },
{ kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0x14, "ilvr", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x6 << 23) | 0x14, "ilvev", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x7 << 23) | 0x14, "ilvod", "Vkmn" },
{ kMsaMask | (0x7 << 23), kMsa | (0x1 << 23) | 0x12, "maddv", "Vkmn" },
{ kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0x12, "msubv", "Vkmn" },
{ kMsaMask | (0xf << 22), kMsa | (0x4 << 22) | 0x1b, "fmadd", "Ukmn" },
{ kMsaMask | (0xf << 22), kMsa | (0x5 << 22) | 0x1b, "fmsub", "Ukmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x4 << 23) | 0x15, "hadd_s", "Vkmn" },
+ { kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0x15, "hadd_u", "Vkmn" },
};
static uint32_t ReadU32(const uint8_t* ptr) {
@@ -760,6 +768,31 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
args << i10;
break;
}
+ case 'Y': // MSA df/n - wd[x].
+ {
+ int32_t df_n = (instruction >> 16) & 0x3f;
+ if ((df_n & (0x3 << 4)) == 0) {
+ opcode += ".b";
+ args << 'w' << sa << '[' << (df_n & 0xf) << ']';
+ break;
+ }
+ if ((df_n & (0x3 << 3)) == 0) {
+ opcode += ".h";
+ args << 'w' << sa << '[' << (df_n & 0x7) << ']';
+ break;
+ }
+ if ((df_n & (0x3 << 2)) == 0) {
+ opcode += ".w";
+ args << 'w' << sa << '[' << (df_n & 0x3) << ']';
+ break;
+ }
+ if ((df_n & (0x3 << 1)) == 0) {
+ opcode += ".d";
+ args << 'w' << sa << '[' << (df_n & 0x1) << ']';
+ }
+ break;
+ }
+ case 'y': args << RegName(sa); break;
}
if (*(args_fmt + 1)) {
args << ", ";
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index bcf007b87a..7064fa3a9b 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -58,7 +58,6 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
#include "oat.h"
#include "oat_file-inl.h"
#include "oat_file_manager.h"
@@ -412,6 +411,8 @@ class OatDumper {
return instruction_set_;
}
+ typedef std::vector<std::unique_ptr<const DexFile>> DexFileUniqV;
+
bool Dump(std::ostream& os) {
bool success = true;
const OatHeader& oat_header = oat_file_.GetOatHeader();
@@ -563,14 +564,50 @@ class OatDumper {
for (size_t i = 0; i < oat_dex_files_.size(); i++) {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
+ if (!DumpOatDexFile(os, *oat_dex_file)) {
+ success = false;
+ }
+ }
+ }
- // If file export selected skip file analysis
- if (options_.export_dex_location_) {
- if (!ExportDexFile(os, *oat_dex_file)) {
+ if (options_.export_dex_location_) {
+ if (kIsVdexEnabled) {
+ std::string error_msg;
+ std::string vdex_filename = GetVdexFilename(oat_file_.GetLocation());
+ if (!OS::FileExists(vdex_filename.c_str())) {
+ os << "File " << vdex_filename.c_str() << " does not exist\n";
+ return false;
+ }
+
+ DexFileUniqV vdex_dex_files;
+ std::unique_ptr<const VdexFile> vdex_file = OpenVdexUnquicken(vdex_filename,
+ &vdex_dex_files,
+ &error_msg);
+ if (vdex_file.get() == nullptr) {
+ os << "Failed to open vdex file: " << error_msg << "\n";
+ return false;
+ }
+ if (oat_dex_files_.size() != vdex_dex_files.size()) {
+ os << "Dex files number in Vdex file does not match Dex files number in Oat file: "
+ << vdex_dex_files.size() << " vs " << oat_dex_files_.size() << '\n';
+ return false;
+ }
+
+ size_t i = 0;
+ for (const auto& vdex_dex_file : vdex_dex_files) {
+ const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
+ CHECK(oat_dex_file != nullptr);
+ CHECK(vdex_dex_file != nullptr);
+ if (!ExportDexFile(os, *oat_dex_file, vdex_dex_file.get())) {
success = false;
}
- } else {
- if (!DumpOatDexFile(os, *oat_dex_file)) {
+ i++;
+ }
+ } else {
+ for (size_t i = 0; i < oat_dex_files_.size(); i++) {
+ const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
+ CHECK(oat_dex_file != nullptr);
+ if (!ExportDexFile(os, *oat_dex_file, /* vdex_dex_file */ nullptr)) {
success = false;
}
}
@@ -628,6 +665,57 @@ class OatDumper {
return nullptr;
}
+ // Returns nullptr and updates error_msg if the Vdex file cannot be opened, otherwise all Dex
+ // files are fully unquickened and stored in dex_files
+ std::unique_ptr<const VdexFile> OpenVdexUnquicken(const std::string& vdex_filename,
+ /* out */ DexFileUniqV* dex_files,
+ /* out */ std::string* error_msg) {
+ std::unique_ptr<const File> file(OS::OpenFileForReading(vdex_filename.c_str()));
+ if (file == nullptr) {
+ *error_msg = "Could not open file " + vdex_filename + " for reading.";
+ return nullptr;
+ }
+
+ int64_t vdex_length = file->GetLength();
+ if (vdex_length == -1) {
+ *error_msg = "Could not read the length of file " + vdex_filename;
+ return nullptr;
+ }
+
+ std::unique_ptr<MemMap> mmap(MemMap::MapFile(
+ file->GetLength(),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ file->Fd(),
+ /* start offset */ 0,
+ /* low_4gb */ false,
+ vdex_filename.c_str(),
+ error_msg));
+ if (mmap == nullptr) {
+ *error_msg = "Failed to mmap file " + vdex_filename + ": " + *error_msg;
+ return nullptr;
+ }
+
+ std::unique_ptr<VdexFile> vdex_file(new VdexFile(mmap.release()));
+ if (!vdex_file->IsValid()) {
+ *error_msg = "Vdex file is not valid";
+ return nullptr;
+ }
+
+ DexFileUniqV tmp_dex_files;
+ if (!vdex_file->OpenAllDexFiles(&tmp_dex_files, error_msg)) {
+ *error_msg = "Failed to open Dex files from Vdex: " + *error_msg;
+ return nullptr;
+ }
+
+ vdex_file->Unquicken(MakeNonOwningPointerVector(tmp_dex_files),
+ vdex_file->GetQuickeningInfo(),
+ /* decompile_return_instruction */ true);
+
+ *dex_files = std::move(tmp_dex_files);
+ return vdex_file;
+ }
+
struct Stats {
enum ByteKind {
kByteKindCode,
@@ -1024,15 +1112,15 @@ class OatDumper {
return success;
}
- bool ExportDexFile(std::ostream& os, const OatFile::OatDexFile& oat_dex_file) {
+ // Backwards compatible Dex file export. If dex_file is nullptr (valid Vdex file not present) the
+ // Dex resource is extracted from the oat_dex_file and its checksum is repaired since it's not
+ // unquickened. Otherwise the dex_file has been fully unquickened and is expected to verify the
+ // original checksum.
+ bool ExportDexFile(std::ostream& os,
+ const OatFile::OatDexFile& oat_dex_file,
+ const DexFile* dex_file) {
std::string error_msg;
std::string dex_file_location = oat_dex_file.GetDexFileLocation();
-
- const DexFile* const dex_file = OpenDexFile(&oat_dex_file, &error_msg);
- if (dex_file == nullptr) {
- os << "Failed to open dex file '" << dex_file_location << "': " << error_msg;
- return false;
- }
size_t fsize = oat_dex_file.FileSize();
// Some quick checks just in case
@@ -1041,6 +1129,27 @@ class OatDumper {
return false;
}
+ if (dex_file == nullptr) {
+ // Exported bytecode is quickened (dex-to-dex transformations present)
+ dex_file = OpenDexFile(&oat_dex_file, &error_msg);
+ if (dex_file == nullptr) {
+ os << "Failed to open dex file '" << dex_file_location << "': " << error_msg;
+ return false;
+ }
+
+ // Recompute checksum
+ reinterpret_cast<DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_ =
+ dex_file->CalculateChecksum();
+ } else {
+ // Vdex unquicken output should match original input bytecode
+ uint32_t orig_checksum =
+ reinterpret_cast<DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_;
+ if (orig_checksum != dex_file->CalculateChecksum()) {
+ os << "Unexpected checksum from unquicken dex file '" << dex_file_location << "'\n";
+ return false;
+ }
+ }
+
// Verify output directory exists
if (!OS::DirectoryExists(options_.export_dex_location_)) {
// TODO: Extend OS::DirectoryExists if symlink support is required
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index 7260d7477b..00344691e0 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -70,5 +70,17 @@ TEST_F(OatDumpTest, TestSymbolizeStatic) {
std::string error_msg;
ASSERT_TRUE(Exec(kStatic, kModeSymbolize, {}, kListOnly, &error_msg)) << error_msg;
}
+
+TEST_F(OatDumpTest, TestExportDex) {
+ std::string error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeOat, {"--export-dex-to=" + tmp_dir_}, kListOnly, &error_msg))
+ << error_msg;
+}
+TEST_F(OatDumpTest, TestExportDexStatic) {
+ TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
+ std::string error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeOat, {"--export-dex-to=" + tmp_dir_}, kListOnly, &error_msg))
+ << error_msg;
+}
#endif
} // namespace art
diff --git a/oatdump/oatdump_test.h b/oatdump/oatdump_test.h
index 1d5c536932..52fe973c1b 100644
--- a/oatdump/oatdump_test.h
+++ b/oatdump/oatdump_test.h
@@ -43,6 +43,24 @@ class OatDumpTest : public CommonRuntimeTest {
CommonRuntimeTest::SetUp();
core_art_location_ = GetCoreArtLocation();
core_oat_location_ = GetSystemImageFilename(GetCoreOatLocation().c_str(), kRuntimeISA);
+ tmp_dir_ = GetScratchDir();
+ }
+
+ virtual void TearDown() {
+ ClearDirectory(tmp_dir_.c_str(), /*recursive*/ false);
+ ASSERT_EQ(rmdir(tmp_dir_.c_str()), 0);
+ CommonRuntimeTest::TearDown();
+ }
+
+ std::string GetScratchDir() {
+ // ANDROID_DATA needs to be set
+ CHECK_NE(static_cast<char*>(nullptr), getenv("ANDROID_DATA"));
+ std::string dir = getenv("ANDROID_DATA");
+ dir += "/oatdump-tmp-dir-XXXXXX";
+ if (mkdtemp(&dir[0]) == nullptr) {
+ PLOG(FATAL) << "mkdtemp(\"" << &dir[0] << "\") failed";
+ }
+ return dir;
}
// Linking flavor.
@@ -217,6 +235,8 @@ class OatDumpTest : public CommonRuntimeTest {
return result;
}
+ std::string tmp_dir_;
+
private:
std::string core_art_location_;
std::string core_oat_location_;
diff --git a/openjdkjvm/Android.bp b/openjdkjvm/Android.bp
index 761df02553..a17899358c 100644
--- a/openjdkjvm/Android.bp
+++ b/openjdkjvm/Android.bp
@@ -20,7 +20,9 @@ cc_defaults {
srcs: ["OpenjdkJvm.cc"],
shared_libs: [
"libbase",
- "libnativehelper",
+ ],
+ header_libs: [
+ "libnativehelper_header_only",
],
}
diff --git a/openjdkjvm/OpenjdkJvm.cc b/openjdkjvm/OpenjdkJvm.cc
index b212ea1c20..29ebefddea 100644
--- a/openjdkjvm/OpenjdkJvm.cc
+++ b/openjdkjvm/OpenjdkJvm.cc
@@ -53,8 +53,8 @@
#include "mirror/string-inl.h"
#include "monitor.h"
#include "native/scoped_fast_native_object_access-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
diff --git a/openjdkjvmti/Android.bp b/openjdkjvmti/Android.bp
index 84a90d65fd..c6090ef9fc 100644
--- a/openjdkjvmti/Android.bp
+++ b/openjdkjvmti/Android.bp
@@ -24,6 +24,7 @@ cc_defaults {
defaults: ["art_defaults"],
host_supported: true,
srcs: [
+ "deopt_manager.cc",
"events.cc",
"fixed_up_dex_file.cc",
"object_tagging.cc",
@@ -50,10 +51,12 @@ cc_defaults {
"ti_timers.cc",
"transform.cc",
],
- header_libs: ["libopenjdkjvmti_headers"],
+ header_libs: [
+ "libnativehelper_header_only",
+ "libopenjdkjvmti_headers",
+ ],
shared_libs: [
"libbase",
- "libnativehelper",
],
}
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index b30d45ae88..5f726b16e0 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -71,6 +71,7 @@
namespace openjdkjvmti {
EventHandler gEventHandler;
+DeoptManager gDeoptManager;
#define ENSURE_NON_NULL(n) \
do { \
@@ -1676,7 +1677,8 @@ extern const jvmtiInterface_1 gJvmtiInterface;
ArtJvmTiEnv::ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler)
: art_vm(runtime),
local_data(nullptr),
- capabilities() {
+ capabilities(),
+ event_info_mutex_("jvmtiEnv_EventInfoMutex") {
object_tag_table = std::unique_ptr<ObjectTagTable>(new ObjectTagTable(event_handler, this));
functions = &gJvmtiInterface;
}
@@ -1710,6 +1712,7 @@ static jint GetEnvHandler(art::JavaVMExt* vm, /*out*/void** env, jint version) {
extern "C" bool ArtPlugin_Initialize() {
art::Runtime* runtime = art::Runtime::Current();
+ gDeoptManager.Setup();
if (runtime->IsStarted()) {
PhaseUtil::SetToLive();
} else {
@@ -1730,6 +1733,7 @@ extern "C" bool ArtPlugin_Initialize() {
extern "C" bool ArtPlugin_Deinitialize() {
gEventHandler.Shutdown();
+ gDeoptManager.Shutdown();
PhaseUtil::Unregister();
ThreadUtil::Unregister();
ClassUtil::Unregister();
diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h
index ad405e8571..126346088c 100644
--- a/openjdkjvmti/art_jvmti.h
+++ b/openjdkjvmti/art_jvmti.h
@@ -39,10 +39,12 @@
#include <jni.h>
+#include "deopt_manager.h"
#include "base/casts.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/strlcpy.h"
+#include "base/mutex.h"
#include "events.h"
#include "java_vm_ext.h"
#include "jni_env_ext.h"
@@ -77,12 +79,15 @@ struct ArtJvmTiEnv : public jvmtiEnv {
// or by putting a list in the ClassExt of a field's DeclaringClass.
// TODO Maybe just have an extension to let one put a watch on every field, that would probably be
// good enough maybe since you probably want either a few or all/almost all of them.
- std::unordered_set<art::ArtField*> access_watched_fields;
- std::unordered_set<art::ArtField*> modify_watched_fields;
+ std::unordered_set<art::ArtField*> access_watched_fields GUARDED_BY(event_info_mutex_);
+ std::unordered_set<art::ArtField*> modify_watched_fields GUARDED_BY(event_info_mutex_);
// Set of breakpoints is unique to each jvmtiEnv.
- std::unordered_set<Breakpoint> breakpoints;
- std::unordered_set<const art::ShadowFrame*> notify_frames;
+ std::unordered_set<Breakpoint> breakpoints GUARDED_BY(event_info_mutex_);
+ std::unordered_set<const art::ShadowFrame*> notify_frames GUARDED_BY(event_info_mutex_);
+
+ // RW lock to protect access to all of the event data.
+ art::ReaderWriterMutex event_info_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler);
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
new file mode 100644
index 0000000000..f843054681
--- /dev/null
+++ b/openjdkjvmti/deopt_manager.cc
@@ -0,0 +1,322 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <functional>
+
+#include "deopt_manager.h"
+
+#include "art_jvmti.h"
+#include "art_method-inl.h"
+#include "base/enums.h"
+#include "base/mutex-inl.h"
+#include "dex_file_annotations.h"
+#include "events-inl.h"
+#include "jni_internal.h"
+#include "mirror/class-inl.h"
+#include "mirror/object_array-inl.h"
+#include "modifiers.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "runtime_callbacks.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-current-inl.h"
+#include "thread_list.h"
+#include "ti_phase.h"
+
+namespace openjdkjvmti {
+
+// TODO We should make this much more selective in the future so we only return true when we
+// actually care about the method (i.e. had locals changed, have breakpoints, etc.). For now though
+// we can just assume that we care we are loaded at all.
+//
+// Even if we don't keep track of this at the method level we might want to keep track of it at the
+// level of enabled capabilities.
+bool JvmtiMethodInspectionCallback::IsMethodBeingInspected(
+ art::ArtMethod* method ATTRIBUTE_UNUSED) {
+ return true;
+}
+
+bool JvmtiMethodInspectionCallback::IsMethodSafeToJit(art::ArtMethod* method) {
+ return !manager_->MethodHasBreakpoints(method);
+}
+
+DeoptManager::DeoptManager()
+ : deoptimization_status_lock_("JVMTI_DeoptimizationStatusLock"),
+ deoptimization_condition_("JVMTI_DeoptimizationCondition", deoptimization_status_lock_),
+ performing_deoptimization_(false),
+ global_deopt_count_(0),
+ deopter_count_(0),
+ inspection_callback_(this) { }
+
+void DeoptManager::Setup() {
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("Add method Inspection Callback");
+ art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
+ callbacks->AddMethodInspectionCallback(&inspection_callback_);
+}
+
+void DeoptManager::Shutdown() {
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("remove method Inspection Callback");
+ art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
+ callbacks->RemoveMethodInspectionCallback(&inspection_callback_);
+}
+
+bool DeoptManager::MethodHasBreakpoints(art::ArtMethod* method) {
+ art::MutexLock lk(art::Thread::Current(), deoptimization_status_lock_);
+ return MethodHasBreakpointsLocked(method);
+}
+
+bool DeoptManager::MethodHasBreakpointsLocked(art::ArtMethod* method) {
+ if (deopter_count_ == 0) {
+ return false;
+ }
+ auto elem = breakpoint_status_.find(method);
+ return elem != breakpoint_status_.end() && elem->second != 0;
+}
+
+void DeoptManager::RemoveDeoptimizeAllMethods() {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedThreadSuspension sts(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+ RemoveDeoptimizeAllMethodsLocked(self);
+}
+
+void DeoptManager::AddDeoptimizeAllMethods() {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedThreadSuspension sts(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+ AddDeoptimizeAllMethodsLocked(self);
+}
+
+void DeoptManager::AddMethodBreakpoint(art::ArtMethod* method) {
+ DCHECK(method->IsInvokable());
+ DCHECK(!method->IsProxyMethod()) << method->PrettyMethod();
+ DCHECK(!method->IsNative()) << method->PrettyMethod();
+
+ art::Thread* self = art::Thread::Current();
+ method = method->GetCanonicalMethod();
+ bool is_default = method->IsDefault();
+
+ art::ScopedThreadSuspension sts(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+
+ DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
+
+ if (MethodHasBreakpointsLocked(method)) {
+ // Don't need to do anything extra.
+ breakpoint_status_[method]++;
+ // Another thread might be deoptimizing the very method we just added new breakpoints for. Wait
+ // for any deopts to finish before moving on.
+ WaitForDeoptimizationToFinish(self);
+ return;
+ }
+ breakpoint_status_[method] = 1;
+ auto instrumentation = art::Runtime::Current()->GetInstrumentation();
+ if (instrumentation->IsForcedInterpretOnly()) {
+ // We are already interpreting everything so no need to do anything.
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+ return;
+ } else if (is_default) {
+ AddDeoptimizeAllMethodsLocked(self);
+ } else {
+ PerformLimitedDeoptimization(self, method);
+ }
+}
+
+void DeoptManager::RemoveMethodBreakpoint(art::ArtMethod* method) {
+ DCHECK(method->IsInvokable()) << method->PrettyMethod();
+ DCHECK(!method->IsProxyMethod()) << method->PrettyMethod();
+ DCHECK(!method->IsNative()) << method->PrettyMethod();
+
+ art::Thread* self = art::Thread::Current();
+ method = method->GetCanonicalMethod();
+ bool is_default = method->IsDefault();
+
+ art::ScopedThreadSuspension sts(self, art::kSuspended);
+ // Ideally we should do a ScopedSuspendAll right here to get the full mutator_lock_ that we might
+ // need but since that is very heavy we will instead just use a condition variable to make sure we
+ // don't race with ourselves.
+ deoptimization_status_lock_.ExclusiveLock(self);
+
+ DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
+ DCHECK(MethodHasBreakpointsLocked(method)) << "Breakpoint on a method was removed without "
+ << "breakpoints present!";
+ auto instrumentation = art::Runtime::Current()->GetInstrumentation();
+ breakpoint_status_[method] -= 1;
+ if (UNLIKELY(instrumentation->IsForcedInterpretOnly())) {
+ // We don't need to do anything since we are interpreting everything anyway.
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+ return;
+ } else if (breakpoint_status_[method] == 0) {
+ if (UNLIKELY(is_default)) {
+ RemoveDeoptimizeAllMethodsLocked(self);
+ } else {
+ PerformLimitedUndeoptimization(self, method);
+ }
+ } else {
+ // Another thread might be deoptimizing the very methods we just removed breakpoints from. Wait
+ // for any deopts to finish before moving on.
+ WaitForDeoptimizationToFinish(self);
+ }
+}
+
+void DeoptManager::WaitForDeoptimizationToFinishLocked(art::Thread* self) {
+ while (performing_deoptimization_) {
+ deoptimization_condition_.Wait(self);
+ }
+}
+
+void DeoptManager::WaitForDeoptimizationToFinish(art::Thread* self) {
+ WaitForDeoptimizationToFinishLocked(self);
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+}
+
+class ScopedDeoptimizationContext : public art::ValueObject {
+ public:
+ ScopedDeoptimizationContext(art::Thread* self, DeoptManager* deopt)
+ RELEASE(deopt->deoptimization_status_lock_)
+ ACQUIRE(art::Locks::mutator_lock_)
+ ACQUIRE(art::Roles::uninterruptible_)
+ : self_(self), deopt_(deopt), uninterruptible_cause_(nullptr) {
+ deopt_->WaitForDeoptimizationToFinishLocked(self_);
+ DCHECK(!deopt->performing_deoptimization_)
+ << "Already performing deoptimization on another thread!";
+ // Use performing_deoptimization_ to keep track of the lock.
+ deopt_->performing_deoptimization_ = true;
+ deopt_->deoptimization_status_lock_.Unlock(self_);
+ art::Runtime::Current()->GetThreadList()->SuspendAll("JMVTI Deoptimizing methods",
+ /*long_suspend*/ false);
+ uninterruptible_cause_ = self_->StartAssertNoThreadSuspension("JVMTI deoptimizing methods");
+ }
+
+ ~ScopedDeoptimizationContext()
+ RELEASE(art::Locks::mutator_lock_)
+ RELEASE(art::Roles::uninterruptible_) {
+ // Can be suspended again.
+ self_->EndAssertNoThreadSuspension(uninterruptible_cause_);
+ // Release the mutator lock.
+ art::Runtime::Current()->GetThreadList()->ResumeAll();
+ // Let other threads know it's fine to proceed.
+ art::MutexLock lk(self_, deopt_->deoptimization_status_lock_);
+ deopt_->performing_deoptimization_ = false;
+ deopt_->deoptimization_condition_.Broadcast(self_);
+ }
+
+ private:
+ art::Thread* self_;
+ DeoptManager* deopt_;
+ const char* uninterruptible_cause_;
+};
+
+void DeoptManager::AddDeoptimizeAllMethodsLocked(art::Thread* self) {
+ global_deopt_count_++;
+ if (global_deopt_count_ == 1) {
+ PerformGlobalDeoptimization(self);
+ } else {
+ WaitForDeoptimizationToFinish(self);
+ }
+}
+
+void DeoptManager::RemoveDeoptimizeAllMethodsLocked(art::Thread* self) {
+ DCHECK_GT(global_deopt_count_, 0u) << "Request to remove non-existant global deoptimization!";
+ global_deopt_count_--;
+ if (global_deopt_count_ == 0) {
+ PerformGlobalUndeoptimization(self);
+ } else {
+ WaitForDeoptimizationToFinish(self);
+ }
+}
+
+void DeoptManager::PerformLimitedDeoptimization(art::Thread* self, art::ArtMethod* method) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->Deoptimize(method);
+}
+
+void DeoptManager::PerformLimitedUndeoptimization(art::Thread* self, art::ArtMethod* method) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->Undeoptimize(method);
+}
+
+void DeoptManager::PerformGlobalDeoptimization(art::Thread* self) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->DeoptimizeEverything(
+ kDeoptManagerInstrumentationKey);
+}
+
+void DeoptManager::PerformGlobalUndeoptimization(art::Thread* self) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->UndeoptimizeEverything(
+ kDeoptManagerInstrumentationKey);
+}
+
+
+void DeoptManager::RemoveDeoptimizationRequester() {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedThreadStateChange sts(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+ DCHECK_GT(deopter_count_, 0u) << "Removing deoptimization requester without any being present";
+ deopter_count_--;
+ if (deopter_count_ == 0) {
+ ScopedDeoptimizationContext sdc(self, this);
+ // TODO Give this a real key.
+ art::Runtime::Current()->GetInstrumentation()->DisableDeoptimization("");
+ return;
+ } else {
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+ }
+}
+
+void DeoptManager::AddDeoptimizationRequester() {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedThreadStateChange stsc(self, art::kSuspended);
+ deoptimization_status_lock_.ExclusiveLock(self);
+ deopter_count_++;
+ if (deopter_count_ == 1) {
+ ScopedDeoptimizationContext sdc(self, this);
+ art::Runtime::Current()->GetInstrumentation()->EnableDeoptimization();
+ return;
+ } else {
+ deoptimization_status_lock_.ExclusiveUnlock(self);
+ }
+}
+
+void DeoptManager::DeoptimizeThread(art::Thread* target) {
+ art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
+}
+
+extern DeoptManager gDeoptManager;
+DeoptManager* DeoptManager::Get() {
+ return &gDeoptManager;
+}
+
+} // namespace openjdkjvmti
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
new file mode 100644
index 0000000000..b265fa8ec2
--- /dev/null
+++ b/openjdkjvmti/deopt_manager.h
@@ -0,0 +1,168 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_OPENJDKJVMTI_DEOPT_MANAGER_H_
+#define ART_OPENJDKJVMTI_DEOPT_MANAGER_H_
+
+#include <unordered_map>
+
+#include "jni.h"
+#include "jvmti.h"
+
+#include "base/mutex.h"
+#include "runtime_callbacks.h"
+#include "ti_breakpoint.h"
+
+namespace art {
+class ArtMethod;
+namespace mirror {
+class Class;
+} // namespace mirror
+} // namespace art
+
+namespace openjdkjvmti {
+
+class DeoptManager;
+
+struct JvmtiMethodInspectionCallback : public art::MethodInspectionCallback {
+ public:
+ explicit JvmtiMethodInspectionCallback(DeoptManager* manager) : manager_(manager) {}
+
+ bool IsMethodBeingInspected(art::ArtMethod* method)
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ bool IsMethodSafeToJit(art::ArtMethod* method)
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ private:
+ DeoptManager* manager_;
+};
+
+class ScopedDeoptimizationContext;
+
+class DeoptManager {
+ public:
+ DeoptManager();
+
+ void Setup();
+ void Shutdown();
+
+ void RemoveDeoptimizationRequester() REQUIRES(!deoptimization_status_lock_,
+ !art::Roles::uninterruptible_);
+ void AddDeoptimizationRequester() REQUIRES(!deoptimization_status_lock_,
+ !art::Roles::uninterruptible_);
+ bool MethodHasBreakpoints(art::ArtMethod* method)
+ REQUIRES(!deoptimization_status_lock_);
+
+ void RemoveMethodBreakpoint(art::ArtMethod* method)
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void AddMethodBreakpoint(art::ArtMethod* method)
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void AddDeoptimizeAllMethods()
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void RemoveDeoptimizeAllMethods()
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void DeoptimizeThread(art::Thread* target) REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void DeoptimizeAllThreads() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ static DeoptManager* Get();
+
+ private:
+ bool MethodHasBreakpointsLocked(art::ArtMethod* method)
+ REQUIRES(deoptimization_status_lock_);
+
+ // Wait until nothing is currently in the middle of deoptimizing/undeoptimizing something. This is
+ // needed to ensure that everything is synchronized since threads need to drop the
+ // deoptimization_status_lock_ while deoptimizing methods.
+ void WaitForDeoptimizationToFinish(art::Thread* self)
+ RELEASE(deoptimization_status_lock_) REQUIRES(!art::Locks::mutator_lock_);
+
+ void WaitForDeoptimizationToFinishLocked(art::Thread* self)
+ REQUIRES(deoptimization_status_lock_, !art::Locks::mutator_lock_);
+
+ void AddDeoptimizeAllMethodsLocked(art::Thread* self)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void RemoveDeoptimizeAllMethodsLocked(art::Thread* self)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void PerformGlobalDeoptimization(art::Thread* self)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void PerformGlobalUndeoptimization(art::Thread* self)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void PerformLimitedDeoptimization(art::Thread* self, art::ArtMethod* method)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ void PerformLimitedUndeoptimization(art::Thread* self, art::ArtMethod* method)
+ RELEASE(deoptimization_status_lock_)
+ REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
+
+ static constexpr const char* kDeoptManagerInstrumentationKey = "JVMTI_DeoptManager";
+ // static constexpr const char* kDeoptManagerThreadName = "JVMTI_DeoptManagerWorkerThread";
+
+ art::Mutex deoptimization_status_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ art::ConditionVariable deoptimization_condition_ GUARDED_BY(deoptimization_status_lock_);
+ bool performing_deoptimization_ GUARDED_BY(deoptimization_status_lock_);
+
+ // Number of times we have gotten requests to deopt everything.
+ uint32_t global_deopt_count_ GUARDED_BY(deoptimization_status_lock_);
+
+ // Number of users of deoptimization there currently are.
+ uint32_t deopter_count_ GUARDED_BY(deoptimization_status_lock_);
+
+ // A map from methods to the number of breakpoints in them from all envs.
+ std::unordered_map<art::ArtMethod*, uint32_t> breakpoint_status_
+ GUARDED_BY(deoptimization_status_lock_);
+
+ // The MethodInspectionCallback we use to tell the runtime if we care about particular methods.
+ JvmtiMethodInspectionCallback inspection_callback_;
+
+ // Helper for setting up/tearing-down for deoptimization.
+ friend class ScopedDeoptimizationContext;
+};
+
+} // namespace openjdkjvmti
+#endif // ART_OPENJDKJVMTI_DEOPT_MANAGER_H_
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index ab8e6def2d..7f77f90862 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -21,9 +21,10 @@
#include <type_traits>
#include <tuple>
+#include "base/mutex-inl.h"
#include "events.h"
#include "jni_internal.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
#include "ti_breakpoint.h"
@@ -276,6 +277,7 @@ inline bool EventHandler::ShouldDispatch<ArtJvmtiEvent::kBreakpoint>(
jthread jni_thread ATTRIBUTE_UNUSED,
jmethodID jmethod,
jlocation location) const {
+ art::ReaderMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
art::ArtMethod* method = art::jni::DecodeArtMethod(jmethod);
return ShouldDispatchOnThread<ArtJvmtiEvent::kBreakpoint>(env, thread) &&
env->breakpoints.find({method, location}) != env->breakpoints.end();
@@ -292,6 +294,7 @@ inline bool EventHandler::ShouldDispatch<ArtJvmtiEvent::kFramePop>(
const art::ShadowFrame* frame) const {
// Search for the frame. Do this before checking if we need to send the event so that we don't
// have to deal with use-after-free or the frames being reallocated later.
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
return env->notify_frames.erase(frame) != 0 &&
ShouldDispatchOnThread<ArtJvmtiEvent::kFramePop>(env, thread);
}
@@ -313,6 +316,7 @@ inline bool EventHandler::ShouldDispatch<ArtJvmtiEvent::kFieldModification>(
jfieldID field,
char type_char ATTRIBUTE_UNUSED,
jvalue val ATTRIBUTE_UNUSED) const {
+ art::ReaderMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
return ShouldDispatchOnThread<ArtJvmtiEvent::kFieldModification>(env, thread) &&
env->modify_watched_fields.find(
art::jni::DecodeArtField(field)) != env->modify_watched_fields.end();
@@ -329,6 +333,7 @@ inline bool EventHandler::ShouldDispatch<ArtJvmtiEvent::kFieldAccess>(
jclass field_klass ATTRIBUTE_UNUSED,
jobject object ATTRIBUTE_UNUSED,
jfieldID field) const {
+ art::ReaderMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
return ShouldDispatchOnThread<ArtJvmtiEvent::kFieldAccess>(env, thread) &&
env->access_watched_fields.find(
art::jni::DecodeArtField(field)) != env->access_watched_fields.end();
@@ -475,6 +480,7 @@ inline bool EventHandler::NeedsEventUpdate(ArtJvmTiEnv* env,
ArtJvmtiEvent event = added ? ArtJvmtiEvent::kClassFileLoadHookNonRetransformable
: ArtJvmtiEvent::kClassFileLoadHookRetransformable;
return (added && caps.can_access_local_variables == 1) ||
+ caps.can_generate_breakpoint_events == 1 ||
(caps.can_retransform_classes == 1 &&
IsEventEnabledAnywhere(event) &&
env->event_masks.IsEnabledAnywhere(event));
@@ -492,6 +498,9 @@ inline void EventHandler::HandleChangedCapabilities(ArtJvmTiEnv* env,
if (added && caps.can_access_local_variables == 1) {
HandleLocalAccessCapabilityAdded();
}
+ if (caps.can_generate_breakpoint_events == 1) {
+ HandleBreakpointEventsChanged(added);
+ }
}
}
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 0282fbce1f..6a64441a4a 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -37,6 +37,7 @@
#include "art_jvmti.h"
#include "art_method-inl.h"
#include "base/logging.h"
+#include "deopt_manager.h"
#include "dex_file_types.h"
#include "gc/allocation_listener.h"
#include "gc/gc_pause_listener.h"
@@ -49,7 +50,7 @@
#include "mirror/class.h"
#include "mirror/object-inl.h"
#include "monitor.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
@@ -810,9 +811,49 @@ static uint32_t GetInstrumentationEventsFor(ArtJvmtiEvent event) {
}
}
+static bool EventNeedsFullDeopt(ArtJvmtiEvent event) {
+ switch (event) {
+ case ArtJvmtiEvent::kBreakpoint:
+ case ArtJvmtiEvent::kException:
+ return false;
+ // TODO We should support more of these or at least do something to make them discriminate by
+ // thread.
+ case ArtJvmtiEvent::kMethodEntry:
+ case ArtJvmtiEvent::kExceptionCatch:
+ case ArtJvmtiEvent::kMethodExit:
+ case ArtJvmtiEvent::kFieldModification:
+ case ArtJvmtiEvent::kFieldAccess:
+ case ArtJvmtiEvent::kSingleStep:
+ case ArtJvmtiEvent::kFramePop:
+ return true;
+ default:
+ LOG(FATAL) << "Unexpected event type!";
+ UNREACHABLE();
+ }
+}
+
static void SetupTraceListener(JvmtiMethodTraceListener* listener,
ArtJvmtiEvent event,
bool enable) {
+ bool needs_full_deopt = EventNeedsFullDeopt(event);
+ // Make sure we can deopt.
+ {
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ DeoptManager* deopt_manager = DeoptManager::Get();
+ if (enable) {
+ deopt_manager->AddDeoptimizationRequester();
+ if (needs_full_deopt) {
+ deopt_manager->AddDeoptimizeAllMethods();
+ }
+ } else {
+ if (needs_full_deopt) {
+ deopt_manager->RemoveDeoptimizeAllMethods();
+ }
+ deopt_manager->RemoveDeoptimizationRequester();
+ }
+ }
+
+ // Add the actual listeners.
art::ScopedThreadStateChange stsc(art::Thread::Current(), art::ThreadState::kNative);
uint32_t new_events = GetInstrumentationEventsFor(event);
art::instrumentation::Instrumentation* instr = art::Runtime::Current()->GetInstrumentation();
@@ -821,11 +862,6 @@ static void SetupTraceListener(JvmtiMethodTraceListener* listener,
art::gc::kCollectorTypeInstrumentation);
art::ScopedSuspendAll ssa("jvmti method tracing installation");
if (enable) {
- // TODO Depending on the features being used we should be able to avoid deoptimizing everything
- // like we do here.
- if (!instr->AreAllMethodsDeoptimized()) {
- instr->EnableMethodTracing("jvmti-tracing", /*needs_interpreter*/true);
- }
instr->AddListener(listener, new_events);
} else {
instr->RemoveListener(listener, new_events);
@@ -910,6 +946,7 @@ void EventHandler::HandleEventType(ArtJvmtiEvent event, bool enable) {
}
// FramePop can never be disabled once it's been turned on since we would either need to deal
// with dangling pointers or have missed events.
+ // TODO We really need to make this not the case anymore.
case ArtJvmtiEvent::kFramePop:
if (!enable || (enable && frame_pop_enabled)) {
break;
@@ -1046,6 +1083,14 @@ jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
return ERR(NONE);
}
+void EventHandler::HandleBreakpointEventsChanged(bool added) {
+ if (added) {
+ DeoptManager::Get()->AddDeoptimizationRequester();
+ } else {
+ DeoptManager::Get()->RemoveDeoptimizationRequester();
+ }
+}
+
void EventHandler::Shutdown() {
// Need to remove the method_trace_listener_ if it's there.
art::Thread* self = art::Thread::Current();
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index a062e1589e..aed24e59f3 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -232,6 +232,7 @@ class EventHandler {
void HandleEventType(ArtJvmtiEvent event, bool enable);
void HandleLocalAccessCapabilityAdded();
+ void HandleBreakpointEventsChanged(bool enable);
bool OtherMonitorEventsEnabledAnywhere(ArtJvmtiEvent event);
diff --git a/openjdkjvmti/jvmti_weak_table-inl.h b/openjdkjvmti/jvmti_weak_table-inl.h
index 1c82255fff..5d20946070 100644
--- a/openjdkjvmti/jvmti_weak_table-inl.h
+++ b/openjdkjvmti/jvmti_weak_table-inl.h
@@ -44,7 +44,7 @@
#include "jvmti_allocator.h"
#include "mirror/class.h"
#include "mirror/object.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "runtime.h"
namespace openjdkjvmti {
diff --git a/openjdkjvmti/ti_breakpoint.cc b/openjdkjvmti/ti_breakpoint.cc
index f5116a8080..8e5b56e9bf 100644
--- a/openjdkjvmti/ti_breakpoint.cc
+++ b/openjdkjvmti/ti_breakpoint.cc
@@ -36,13 +36,15 @@
#include "art_jvmti.h"
#include "art_method-inl.h"
#include "base/enums.h"
+#include "base/mutex-inl.h"
+#include "deopt_manager.h"
#include "dex_file_annotations.h"
#include "events-inl.h"
#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
#include "modifiers.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
@@ -63,16 +65,29 @@ Breakpoint::Breakpoint(art::ArtMethod* m, jlocation loc) : method_(m), location_
void BreakpointUtil::RemoveBreakpointsInClass(ArtJvmTiEnv* env, art::mirror::Class* klass) {
std::vector<Breakpoint> to_remove;
- for (const Breakpoint& b : env->breakpoints) {
- if (b.GetMethod()->GetDeclaringClass() == klass) {
- to_remove.push_back(b);
+ {
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
+ for (const Breakpoint& b : env->breakpoints) {
+ if (b.GetMethod()->GetDeclaringClass() == klass) {
+ to_remove.push_back(b);
+ }
+ }
+ for (const Breakpoint& b : to_remove) {
+ auto it = env->breakpoints.find(b);
+ DCHECK(it != env->breakpoints.end());
+ env->breakpoints.erase(it);
}
}
- for (const Breakpoint& b : to_remove) {
- auto it = env->breakpoints.find(b);
- DCHECK(it != env->breakpoints.end());
- env->breakpoints.erase(it);
+ if (!to_remove.empty()) {
+ LOG(WARNING) << "Methods with breakpoints potentially not being un-deoptimized.";
}
+ // TODO Figure out how to do this.
+ // DeoptManager* deopt = DeoptManager::Get();
+ // for (const Breakpoint& b : to_remove) {
+ // // TODO It might be good to send these all at once instead.
+ // // deopt->RemoveMethodBreakpointSuspended(b.GetMethod());
+ // LOG(WARNING) << "not un-deopting methods! :-0";
+ // }
}
jvmtiError BreakpointUtil::SetBreakpoint(jvmtiEnv* jenv, jmethodID method, jlocation location) {
@@ -80,19 +95,23 @@ jvmtiError BreakpointUtil::SetBreakpoint(jvmtiEnv* jenv, jmethodID method, jloca
if (method == nullptr) {
return ERR(INVALID_METHODID);
}
- // Need to get mutator_lock_ so we can find the interface version of any default methods.
art::ScopedObjectAccess soa(art::Thread::Current());
art::ArtMethod* art_method = art::jni::DecodeArtMethod(method)->GetCanonicalMethod();
if (location < 0 || static_cast<uint32_t>(location) >=
art_method->GetCodeItem()->insns_size_in_code_units_) {
return ERR(INVALID_LOCATION);
}
- auto res_pair = env->breakpoints.insert(/* Breakpoint */ {art_method, location});
- if (!res_pair.second) {
- // Didn't get inserted because it's already present!
- return ERR(DUPLICATE);
+ DeoptManager::Get()->AddMethodBreakpoint(art_method);
+ {
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
+ auto res_pair = env->breakpoints.insert(/* Breakpoint */ {art_method, location});
+ if (LIKELY(res_pair.second)) {
+ return OK;
+ }
}
- return OK;
+ // Didn't get inserted because it's already present!
+ DeoptManager::Get()->RemoveMethodBreakpoint(art_method);
+ return ERR(DUPLICATE);
}
jvmtiError BreakpointUtil::ClearBreakpoint(jvmtiEnv* jenv, jmethodID method, jlocation location) {
@@ -100,14 +119,17 @@ jvmtiError BreakpointUtil::ClearBreakpoint(jvmtiEnv* jenv, jmethodID method, jlo
if (method == nullptr) {
return ERR(INVALID_METHODID);
}
- // Need to get mutator_lock_ so we can find the interface version of any default methods.
art::ScopedObjectAccess soa(art::Thread::Current());
- auto pos = env->breakpoints.find(
- /* Breakpoint */ {art::jni::DecodeArtMethod(method)->GetCanonicalMethod(), location});
- if (pos == env->breakpoints.end()) {
- return ERR(NOT_FOUND);
+ art::ArtMethod* art_method = art::jni::DecodeArtMethod(method)->GetCanonicalMethod();
+ {
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
+ auto pos = env->breakpoints.find(/* Breakpoint */ {art_method, location});
+ if (pos == env->breakpoints.end()) {
+ return ERR(NOT_FOUND);
+ }
+ env->breakpoints.erase(pos);
}
- env->breakpoints.erase(pos);
+ DeoptManager::Get()->RemoveMethodBreakpoint(art_method);
return OK;
}
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index 5f29416134..e69c78bab1 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -60,7 +60,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/object_reference.h"
#include "mirror/reference.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "primitive.h"
#include "reflection.h"
#include "runtime.h"
diff --git a/openjdkjvmti/ti_class_loader.cc b/openjdkjvmti/ti_class_loader.cc
index e81e4bc803..b551b55e18 100644
--- a/openjdkjvmti/ti_class_loader.cc
+++ b/openjdkjvmti/ti_class_loader.cc
@@ -51,7 +51,7 @@
#include "mirror/class.h"
#include "mirror/class_ext.h"
#include "mirror/object.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "object_lock.h"
#include "runtime.h"
#include "transform.h"
diff --git a/openjdkjvmti/ti_field.cc b/openjdkjvmti/ti_field.cc
index c45b926695..b8691837eb 100644
--- a/openjdkjvmti/ti_field.cc
+++ b/openjdkjvmti/ti_field.cc
@@ -189,6 +189,7 @@ jvmtiError FieldUtil::IsFieldSynthetic(jvmtiEnv* env ATTRIBUTE_UNUSED,
jvmtiError FieldUtil::SetFieldModificationWatch(jvmtiEnv* jenv, jclass klass, jfieldID field) {
ArtJvmTiEnv* env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
if (klass == nullptr) {
return ERR(INVALID_CLASS);
}
@@ -205,6 +206,7 @@ jvmtiError FieldUtil::SetFieldModificationWatch(jvmtiEnv* jenv, jclass klass, jf
jvmtiError FieldUtil::ClearFieldModificationWatch(jvmtiEnv* jenv, jclass klass, jfieldID field) {
ArtJvmTiEnv* env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
if (klass == nullptr) {
return ERR(INVALID_CLASS);
}
@@ -221,6 +223,7 @@ jvmtiError FieldUtil::ClearFieldModificationWatch(jvmtiEnv* jenv, jclass klass,
jvmtiError FieldUtil::SetFieldAccessWatch(jvmtiEnv* jenv, jclass klass, jfieldID field) {
ArtJvmTiEnv* env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
if (klass == nullptr) {
return ERR(INVALID_CLASS);
}
@@ -237,6 +240,7 @@ jvmtiError FieldUtil::SetFieldAccessWatch(jvmtiEnv* jenv, jclass klass, jfieldID
jvmtiError FieldUtil::ClearFieldAccessWatch(jvmtiEnv* jenv, jclass klass, jfieldID field) {
ArtJvmTiEnv* env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
+ art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
if (klass == nullptr) {
return ERR(INVALID_CLASS);
}
diff --git a/openjdkjvmti/ti_field.h b/openjdkjvmti/ti_field.h
index 8a229ed19d..3cf29f099a 100644
--- a/openjdkjvmti/ti_field.h
+++ b/openjdkjvmti/ti_field.h
@@ -35,6 +35,8 @@
#include "jni.h"
#include "jvmti.h"
+#include "art_jvmti.h"
+
namespace openjdkjvmti {
class FieldUtil {
@@ -61,10 +63,14 @@ class FieldUtil {
jfieldID field,
jboolean* is_synthetic_ptr);
- static jvmtiError SetFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field);
- static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field);
- static jvmtiError SetFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field);
- static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field);
+ static jvmtiError SetFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field)
+ REQUIRES(!ArtJvmTiEnv::event_info_mutex_);
+ static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env, jclass klass, jfieldID field)
+ REQUIRES(!ArtJvmTiEnv::event_info_mutex_);
+ static jvmtiError SetFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field)
+ REQUIRES(!ArtJvmTiEnv::event_info_mutex_);
+ static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field)
+ REQUIRES(!ArtJvmTiEnv::event_info_mutex_);
};
} // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 50402a04a9..5d63285825 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -47,7 +47,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "modifiers.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
@@ -86,21 +86,6 @@ struct TiMethodCallback : public art::MethodCallback {
TiMethodCallback gMethodCallback;
-// TODO We should make this much more selective in the future so we only return true when we
-// actually care about the method (i.e. had locals changed, have breakpoints, etc.). For now though
-// we can just assume that we care we are loaded at all.
-//
-// Even if we don't keep track of this at the method level we might want to keep track of it at the
-// level of enabled capabilities.
-struct TiMethodInspectionCallback : public art::MethodInspectionCallback {
- bool IsMethodBeingInspected(art::ArtMethod* method ATTRIBUTE_UNUSED)
- OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return true;
- }
-};
-
-TiMethodInspectionCallback gMethodInspectionCallback;
-
void MethodUtil::Register(EventHandler* handler) {
gMethodCallback.event_handler = handler;
art::ScopedThreadStateChange stsc(art::Thread::Current(),
@@ -108,7 +93,6 @@ void MethodUtil::Register(EventHandler* handler) {
art::ScopedSuspendAll ssa("Add method callback");
art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
callbacks->AddMethodCallback(&gMethodCallback);
- callbacks->AddMethodInspectionCallback(&gMethodInspectionCallback);
}
void MethodUtil::Unregister() {
@@ -117,7 +101,6 @@ void MethodUtil::Unregister() {
art::ScopedSuspendAll ssa("Remove method callback");
art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
callbacks->RemoveMethodCallback(&gMethodCallback);
- callbacks->AddMethodInspectionCallback(&gMethodInspectionCallback);
}
jvmtiError MethodUtil::GetBytecodes(jvmtiEnv* env,
@@ -779,13 +762,15 @@ jvmtiError MethodUtil::GetLocalVariableGeneric(jvmtiEnv* env ATTRIBUTE_UNUSED,
// Suspend JIT since it can get confused if we deoptimize methods getting jitted.
art::jit::ScopedJitSuspend suspend_jit;
art::ScopedObjectAccess soa(self);
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
GetLocalVariableClosure c(self, depth, slot, type, val);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (!target->RequestSynchronousCheckpoint(&c)) {
return ERR(THREAD_NOT_ALIVE);
} else {
@@ -906,13 +891,15 @@ jvmtiError MethodUtil::SetLocalVariableGeneric(jvmtiEnv* env ATTRIBUTE_UNUSED,
// Suspend JIT since it can get confused if we deoptimize methods getting jitted.
art::jit::ScopedJitSuspend suspend_jit;
art::ScopedObjectAccess soa(self);
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
SetLocalVariableClosure c(self, depth, slot, type, val);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (!target->RequestSynchronousCheckpoint(&c)) {
return ERR(THREAD_NOT_ALIVE);
} else {
@@ -963,13 +950,15 @@ jvmtiError MethodUtil::GetLocalInstance(jvmtiEnv* env ATTRIBUTE_UNUSED,
}
art::Thread* self = art::Thread::Current();
art::ScopedObjectAccess soa(self);
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
GetLocalInstanceClosure c(self, depth, data);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (!target->RequestSynchronousCheckpoint(&c)) {
return ERR(THREAD_NOT_ALIVE);
} else {
diff --git a/openjdkjvmti/ti_monitor.cc b/openjdkjvmti/ti_monitor.cc
index 5a38f46901..5881f8c7a9 100644
--- a/openjdkjvmti/ti_monitor.cc
+++ b/openjdkjvmti/ti_monitor.cc
@@ -334,10 +334,11 @@ jvmtiError MonitorUtil::GetCurrentContendedMonitor(jvmtiEnv* env ATTRIBUTE_UNUSE
}
art::Thread* self = art::Thread::Current();
art::ScopedObjectAccess soa(self);
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
struct GetContendedMonitorClosure : public art::Closure {
@@ -393,6 +394,7 @@ jvmtiError MonitorUtil::GetCurrentContendedMonitor(jvmtiEnv* env ATTRIBUTE_UNUSE
jobject* out_;
};
GetContendedMonitorClosure closure(self, monitor);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (!target->RequestSynchronousCheckpoint(&closure)) {
return ERR(THREAD_NOT_ALIVE);
}
diff --git a/openjdkjvmti/ti_phase.cc b/openjdkjvmti/ti_phase.cc
index 07cf31c354..23df27fbda 100644
--- a/openjdkjvmti/ti_phase.cc
+++ b/openjdkjvmti/ti_phase.cc
@@ -34,7 +34,7 @@
#include "art_jvmti.h"
#include "base/macros.h"
#include "events-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "runtime.h"
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/openjdkjvmti/ti_properties.cc b/openjdkjvmti/ti_properties.cc
index c412814d8d..4fb3070e93 100644
--- a/openjdkjvmti/ti_properties.cc
+++ b/openjdkjvmti/ti_properties.cc
@@ -35,8 +35,8 @@
#include <vector>
#include "jni.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "art_jvmti.h"
#include "runtime.h"
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 53abfbca00..c4f16f5e2d 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -62,7 +62,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_ext.h"
#include "mirror/object.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "non_debuggable_classes.h"
#include "object_lock.h"
#include "runtime.h"
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index bafc8552b1..fe12a25151 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -44,7 +44,7 @@
#include "mirror/class-inl.h"
#include "mirror/object.h"
#include "mirror/string.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr-inl.h"
#include "runtime.h"
#include "runtime_callbacks.h"
@@ -227,7 +227,8 @@ jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env ATTRIBUTE_U
std::string error_msg;
std::vector<std::unique_ptr<const art::DexFile>> dex_files;
- if (!art::DexFileLoader::Open(segment, segment, true, &error_msg, &dex_files)) {
+ if (!art::DexFileLoader::Open(
+ segment, segment, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files)) {
LOG(WARNING) << "Could not open " << segment << " for boot classpath extension: " << error_msg;
return ERR(ILLEGAL_ARGUMENT);
}
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index e0c139954d..e346e16f92 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -53,7 +53,7 @@
#include "jni_internal.h"
#include "mirror/class.h"
#include "mirror/dex_cache.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
#include "ti_thread.h"
@@ -220,28 +220,33 @@ jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
// It is not great that we have to hold these locks for so long, but it is necessary to ensure
// that the thread isn't dying on us.
art::ScopedObjectAccess soa(art::Thread::Current());
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
art::Thread* thread;
jvmtiError thread_error = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return thread_error;
}
DCHECK(thread != nullptr);
art::ThreadState state = thread->GetState();
if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(THREAD_NOT_ALIVE);
}
if (max_frame_count < 0) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(ILLEGAL_ARGUMENT);
}
if (frame_buffer == nullptr || count_ptr == nullptr) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(NULL_POINTER);
}
if (max_frame_count == 0) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
*count_ptr = 0;
return ERR(NONE);
}
@@ -251,23 +256,29 @@ jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
GetStackTraceDirectClosure closure(frame_buffer,
static_cast<size_t>(start_depth),
static_cast<size_t>(max_frame_count));
- thread->RequestSynchronousCheckpoint(&closure);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!thread->RequestSynchronousCheckpoint(&closure)) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
*count_ptr = static_cast<jint>(closure.index);
if (closure.index < static_cast<size_t>(start_depth)) {
return ERR(ILLEGAL_ARGUMENT);
}
return ERR(NONE);
- }
-
- GetStackTraceVectorClosure closure(0, 0);
- thread->RequestSynchronousCheckpoint(&closure);
+ } else {
+ GetStackTraceVectorClosure closure(0, 0);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!thread->RequestSynchronousCheckpoint(&closure)) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
- return TranslateFrameVector(closure.frames,
- start_depth,
- closure.start_result,
- max_frame_count,
- frame_buffer,
- count_ptr);
+ return TranslateFrameVector(closure.frames,
+ start_depth,
+ closure.start_result,
+ max_frame_count,
+ frame_buffer,
+ count_ptr);
+ }
}
template <typename Data>
@@ -678,25 +689,29 @@ jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
// It is not great that we have to hold these locks for so long, but it is necessary to ensure
// that the thread isn't dying on us.
art::ScopedObjectAccess soa(art::Thread::Current());
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
art::Thread* thread;
jvmtiError thread_error = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return thread_error;
}
DCHECK(thread != nullptr);
art::ThreadState state = thread->GetState();
if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(THREAD_NOT_ALIVE);
}
if (count_ptr == nullptr) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(NULL_POINTER);
}
GetFrameCountClosure closure;
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (!thread->RequestSynchronousCheckpoint(&closure)) {
return ERR(THREAD_NOT_ALIVE);
}
@@ -760,29 +775,36 @@ jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
// It is not great that we have to hold these locks for so long, but it is necessary to ensure
// that the thread isn't dying on us.
art::ScopedObjectAccess soa(art::Thread::Current());
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
art::Thread* thread;
jvmtiError thread_error = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return thread_error;
}
DCHECK(thread != nullptr);
art::ThreadState state = thread->GetState();
if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(THREAD_NOT_ALIVE);
}
if (depth < 0) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(ILLEGAL_ARGUMENT);
}
if (method_ptr == nullptr || location_ptr == nullptr) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
return ERR(NULL_POINTER);
}
GetLocationClosure closure(static_cast<size_t>(depth));
- thread->RequestSynchronousCheckpoint(&closure);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
+ if (!thread->RequestSynchronousCheckpoint(&closure)) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
if (closure.method == nullptr) {
return ERR(NO_MORE_FRAMES);
@@ -891,17 +913,21 @@ static jvmtiError GetOwnedMonitorInfoCommon(jthread thread, Fn handle_results) {
MonitorInfoClosure<Fn> closure(soa, handle_results);
bool called_method = false;
{
- art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
}
if (target != self) {
called_method = true;
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (!target->RequestSynchronousCheckpoint(&closure)) {
return ERR(THREAD_NOT_ALIVE);
}
+ } else {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
}
}
// Cannot call the closure on the current thread if we have thread_list_lock since we need to call
@@ -1024,9 +1050,12 @@ jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth)
method,
visitor.GetDexPc());
}
- // Mark shadow frame as needs_notify_pop_
- shadow_frame->SetNotifyPop(true);
- tienv->notify_frames.insert(shadow_frame);
+ {
+ art::WriterMutexLock lk(self, tienv->event_info_mutex_);
+ // Mark shadow frame as needs_notify_pop_
+ shadow_frame->SetNotifyPop(true);
+ tienv->notify_frames.insert(shadow_frame);
+ }
// Make sure can we will go to the interpreter and use the shadow frames.
if (needs_instrument) {
art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 9a809df011..99dea540e5 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -43,7 +43,7 @@
#include "mirror/class.h"
#include "mirror/object-inl.h"
#include "mirror/string.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr.h"
#include "runtime.h"
#include "runtime_callbacks.h"
@@ -340,47 +340,92 @@ static jint GetJvmtiThreadStateFromInternal(const InternalThreadState& state) {
jint jvmti_state = JVMTI_THREAD_STATE_ALIVE;
if (state.thread_user_code_suspend_count != 0) {
+ // Suspended can be set with any thread state so check it here. Even if the thread isn't in
+ // kSuspended state it will move to that once it hits a checkpoint so we can still set this.
jvmti_state |= JVMTI_THREAD_STATE_SUSPENDED;
// Note: We do not have data about the previous state. Otherwise we should load the previous
// state here.
}
if (state.native_thread->IsInterrupted()) {
+ // Interrupted can be set with any thread state so check it here.
jvmti_state |= JVMTI_THREAD_STATE_INTERRUPTED;
}
- if (internal_thread_state == art::ThreadState::kNative) {
- jvmti_state |= JVMTI_THREAD_STATE_IN_NATIVE;
- }
-
- if (internal_thread_state == art::ThreadState::kRunnable ||
- internal_thread_state == art::ThreadState::kWaitingWeakGcRootRead ||
- internal_thread_state == art::ThreadState::kSuspended) {
- jvmti_state |= JVMTI_THREAD_STATE_RUNNABLE;
- } else if (internal_thread_state == art::ThreadState::kBlocked) {
- jvmti_state |= JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER;
- } else {
- // Should be in waiting state.
- jvmti_state |= JVMTI_THREAD_STATE_WAITING;
-
- if (internal_thread_state == art::ThreadState::kTimedWaiting ||
- internal_thread_state == art::ThreadState::kSleeping) {
- jvmti_state |= JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT;
- } else {
- jvmti_state |= JVMTI_THREAD_STATE_WAITING_INDEFINITELY;
- }
-
- if (internal_thread_state == art::ThreadState::kSleeping) {
- jvmti_state |= JVMTI_THREAD_STATE_SLEEPING;
- }
-
- if (internal_thread_state == art::ThreadState::kTimedWaiting ||
- internal_thread_state == art::ThreadState::kWaiting) {
- jvmti_state |= JVMTI_THREAD_STATE_IN_OBJECT_WAIT;
- }
-
- // TODO: PARKED. We'll have to inspect the stack.
+ // Enumerate all the thread states and fill in the other bits. This contains the results of
+ // following the decision tree in the JVMTI spec GetThreadState documentation.
+ switch (internal_thread_state) {
+ case art::ThreadState::kRunnable:
+ case art::ThreadState::kWaitingWeakGcRootRead:
+ case art::ThreadState::kSuspended:
+ // These are all simply runnable.
+ // kRunnable is self-explanatory.
+ // kWaitingWeakGcRootRead is set during some operations with strings due to the intern-table
+ // so we want to keep it marked as runnable.
+ // kSuspended we don't mark since if we don't have a user_code_suspend_count then it is done
+ // by the GC and not a JVMTI suspension, which means it cannot be removed by ResumeThread.
+ jvmti_state |= JVMTI_THREAD_STATE_RUNNABLE;
+ break;
+ case art::ThreadState::kNative:
+ // kNative means native and runnable. Technically THREAD_STATE_IN_NATIVE can be set with any
+ // state but we don't have the information to know if it should be present for any but the
+ // kNative state.
+ jvmti_state |= (JVMTI_THREAD_STATE_IN_NATIVE |
+ JVMTI_THREAD_STATE_RUNNABLE);
+ break;
+ case art::ThreadState::kBlocked:
+ // Blocked is one of the top level states so it sits alone.
+ jvmti_state |= JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER;
+ break;
+ case art::ThreadState::kWaiting:
+ // Object.wait() so waiting, indefinitely, in object.wait.
+ jvmti_state |= (JVMTI_THREAD_STATE_WAITING |
+ JVMTI_THREAD_STATE_WAITING_INDEFINITELY |
+ JVMTI_THREAD_STATE_IN_OBJECT_WAIT);
+ break;
+ case art::ThreadState::kTimedWaiting:
+ // Object.wait(long) so waiting, with timeout, in object.wait.
+ jvmti_state |= (JVMTI_THREAD_STATE_WAITING |
+ JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT |
+ JVMTI_THREAD_STATE_IN_OBJECT_WAIT);
+ break;
+ case art::ThreadState::kSleeping:
+ // In object.sleep. This is a timed wait caused by sleep.
+ jvmti_state |= (JVMTI_THREAD_STATE_WAITING |
+ JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT |
+ JVMTI_THREAD_STATE_SLEEPING);
+ break;
+ // TODO We might want to print warnings if we have the debugger running while JVMTI agents are
+ // attached.
+ case art::ThreadState::kWaitingForDebuggerSend:
+ case art::ThreadState::kWaitingForDebuggerToAttach:
+ case art::ThreadState::kWaitingInMainDebuggerLoop:
+ case art::ThreadState::kWaitingForDebuggerSuspension:
+ case art::ThreadState::kWaitingForLockInflation:
+ case art::ThreadState::kWaitingForTaskProcessor:
+ case art::ThreadState::kWaitingForGcToComplete:
+ case art::ThreadState::kWaitingForCheckPointsToRun:
+ case art::ThreadState::kWaitingPerformingGc:
+ case art::ThreadState::kWaitingForJniOnLoad:
+ case art::ThreadState::kWaitingInMainSignalCatcherLoop:
+ case art::ThreadState::kWaitingForSignalCatcherOutput:
+ case art::ThreadState::kWaitingForDeoptimization:
+ case art::ThreadState::kWaitingForMethodTracingStart:
+ case art::ThreadState::kWaitingForVisitObjects:
+ case art::ThreadState::kWaitingForGetObjectsAllocated:
+ case art::ThreadState::kWaitingForGcThreadFlip:
+ // All of these are causing the thread to wait for an indeterminate amount of time but isn't
+ // caused by sleep, park, or object#wait.
+ jvmti_state |= (JVMTI_THREAD_STATE_WAITING |
+ JVMTI_THREAD_STATE_WAITING_INDEFINITELY);
+ break;
+ case art::ThreadState::kStarting:
+ case art::ThreadState::kTerminated:
+ // We only call this if we are alive so we shouldn't see either of these states.
+ LOG(FATAL) << "Should not be in state " << internal_thread_state;
+ UNREACHABLE();
}
+ // TODO: PARKED. We'll have to inspect the stack.
return jvmti_state;
}
@@ -661,7 +706,7 @@ static void* AgentCallback(void* arg) {
// We already have a peer. So call our special Attach function.
art::Thread* self = art::Thread::Attach("JVMTI Agent thread", true, data->thread);
- CHECK(self != nullptr);
+ CHECK(self != nullptr) << "threads_being_born_ should have ensured thread could be attached.";
// The name in Attach() is only for logging. Set the thread name. This is important so
// that the thread is no longer seen as starting up.
{
@@ -674,6 +719,13 @@ static void* AgentCallback(void* arg) {
env->DeleteGlobalRef(data->thread);
data->thread = nullptr;
+ {
+ // The StartThreadBirth was called in the parent thread. We let the runtime know we are up
+ // before going into the provided code.
+ art::MutexLock mu(art::Thread::Current(), *art::Locks::runtime_shutdown_lock_);
+ art::Runtime::Current()->EndThreadBirth();
+ }
+
// Run the agent code.
data->proc(data->jvmti_env, env, const_cast<void*>(data->arg));
@@ -703,6 +755,21 @@ jvmtiError ThreadUtil::RunAgentThread(jvmtiEnv* jvmti_env,
return ERR(NULL_POINTER);
}
+ {
+ art::Runtime* runtime = art::Runtime::Current();
+ art::MutexLock mu(art::Thread::Current(), *art::Locks::runtime_shutdown_lock_);
+ if (runtime->IsShuttingDownLocked()) {
+ // The runtime is shutting down so we cannot create new threads.
+ // TODO It's not fully clear from the spec what we should do here. We aren't yet in
+ // JVMTI_PHASE_DEAD so we cannot return ERR(WRONG_PHASE) but creating new threads is now
+ // impossible. Existing agents don't seem to generally do anything with this return value so
+ // it doesn't matter too much. We could do something like sending a fake ThreadStart event
+ // even though code is never actually run.
+ return ERR(INTERNAL);
+ }
+ runtime->StartThreadBirth();
+ }
+
std::unique_ptr<AgentData> data(new AgentData);
data->arg = arg;
data->proc = proc;
@@ -714,10 +781,14 @@ jvmtiError ThreadUtil::RunAgentThread(jvmtiEnv* jvmti_env,
pthread_t pthread;
int pthread_create_result = pthread_create(&pthread,
- nullptr,
- &AgentCallback,
- reinterpret_cast<void*>(data.get()));
+ nullptr,
+ &AgentCallback,
+ reinterpret_cast<void*>(data.get()));
if (pthread_create_result != 0) {
+ // If the create succeeded the other thread will call EndThreadBirth.
+ art::Runtime* runtime = art::Runtime::Current();
+ art::MutexLock mu(art::Thread::Current(), *art::Locks::runtime_shutdown_lock_);
+ runtime->EndThreadBirth();
return ERR(INTERNAL);
}
data.release();
@@ -963,12 +1034,14 @@ jvmtiError ThreadUtil::StopThread(jvmtiEnv* env ATTRIBUTE_UNUSED,
return ERR(INVALID_OBJECT);
}
art::Handle<art::mirror::Throwable> exc(hs.NewHandle(obj->AsThrowable()));
- art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
+ art::Locks::thread_list_lock_->ExclusiveLock(self);
art::Thread* target = nullptr;
jvmtiError err = ERR(INTERNAL);
if (!GetAliveNativeThread(thread, soa, &target, &err)) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return err;
} else if (target->GetState() == art::ThreadState::kStarting || target->IsStillStarting()) {
+ art::Locks::thread_list_lock_->ExclusiveUnlock(self);
return ERR(THREAD_NOT_ALIVE);
}
struct StopThreadClosure : public art::Closure {
@@ -987,6 +1060,7 @@ jvmtiError ThreadUtil::StopThread(jvmtiEnv* env ATTRIBUTE_UNUSED,
art::Handle<art::mirror::Throwable> exception_;
};
StopThreadClosure c(exc);
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
if (target->RequestSynchronousCheckpoint(&c)) {
return OK;
} else {
diff --git a/profman/profman.cc b/profman/profman.cc
index 8ccf7b4c1d..4c4bb87e49 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -331,6 +331,7 @@ class ProfMan FINAL {
if (use_apk_fd_list) {
if (DexFileLoader::OpenZip(apks_fd_[i],
dex_locations_[i],
+ /* verify */ true,
kVerifyChecksum,
&error_msg,
&dex_files_for_location)) {
@@ -341,6 +342,7 @@ class ProfMan FINAL {
} else {
if (DexFileLoader::Open(apk_files_[i].c_str(),
dex_locations_[i],
+ /* verify */ true,
kVerifyChecksum,
&error_msg,
&dex_files_for_location)) {
diff --git a/runtime/Android.bp b/runtime/Android.bp
index ddfbed4499..a615437985 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -46,6 +46,7 @@ cc_defaults {
"base/timing_logger.cc",
"base/unix_file/fd_file.cc",
"base/unix_file/random_access_file_utils.cc",
+ "cdex/compact_dex_file.cc",
"cha.cc",
"check_jni.cc",
"class_linker.cc",
@@ -405,9 +406,10 @@ cc_defaults {
],
header_libs: [
"art_cmdlineparser_headers",
+ "libnativehelper_header_only",
+ "jni_platform_headers",
],
shared_libs: [
- "libnativehelper",
"libnativebridge",
"libnativeloader",
"libbacktrace",
@@ -510,6 +512,9 @@ art_cc_library {
"libbase",
"libbacktrace",
],
+ header_libs: [
+ "libnativehelper_header_only",
+ ],
}
art_cc_test {
@@ -548,6 +553,7 @@ art_cc_test {
"base/transform_iterator_test.cc",
"base/variant_map_test.cc",
"base/unix_file/fd_file_test.cc",
+ "cdex/compact_dex_file_test.cc",
"cha_test.cc",
"class_linker_test.cc",
"class_loader_context_test.cc",
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
index 50095ae77e..fa51059d3a 100644
--- a/runtime/arch/mips/asm_support_mips.S
+++ b/runtime/arch/mips/asm_support_mips.S
@@ -173,4 +173,30 @@
.set pop
.endm
+// This utility macro is used to check whether the address contained in
+// a register is suitably aligned. Default usage is confirm that the
+// address stored in $sp is a multiple of 16. It can be used for other
+// alignments, and for other base address registers, if needed.
+//
+// Enable this macro by running the shell command:
+//
+// export ART_MIPS32_CHECK_ALIGNMENT=true
+//
+// NOTE: The value of alignment must be a power of 2, and must fit in an
+// unsigned 15-bit integer. The macro won't behave as expected if these
+// conditions aren't met.
+//
+.macro CHECK_ALIGNMENT ba=$sp, tmp=$at, alignment=16
+#ifdef ART_MIPS32_CHECK_ALIGNMENT
+ .set push
+ .set noat
+ .set noreorder
+ andi \tmp, \ba, \alignment-1
+ beqz \tmp, .+12 # Skip break instruction if base address register (ba) is aligned
+ nop
+ break
+ .set pop
+#endif
+.endm
+
#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
index 2edd63f58a..bec52384ac 100644
--- a/runtime/arch/mips/asm_support_mips.h
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -19,7 +19,7 @@
#include "asm_support.h"
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES 96
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES 112
#define FRAME_SIZE_SAVE_REFS_ONLY 48
#define FRAME_SIZE_SAVE_REFS_AND_ARGS 112
#define FRAME_SIZE_SAVE_EVERYTHING 256
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index ca1de0ae2a..3f362de7ce 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -42,7 +42,16 @@ void MipsContext::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr
// Core registers come first, from the highest down to the lowest.
for (uint32_t core_reg : HighToLowBits(frame_info.CoreSpillMask())) {
- gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
+ // If the $ZERO register shows up in the list of registers to
+ // be saved this was only done to properly align the floating
+ // point register save locations to addresses which are
+ // multiples of 8. We only store the address of a register in
+ // gprs_ if the register is not the $ZERO register. The $ZERO
+ // register is read-only so there's never a reason to save it
+ // on the stack.
+ if (core_reg != 0u) {
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
+ }
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
@@ -97,7 +106,9 @@ extern "C" NO_RETURN void art_quick_do_long_jump(uint32_t*, uint32_t*);
void MipsContext::DoLongJump() {
uintptr_t gprs[kNumberOfCoreRegisters];
- uint32_t fprs[kNumberOfFRegisters];
+ // Align fprs[] so that art_quick_do_long_jump() can load FPU
+ // registers from it using the ldc1 instruction.
+ uint32_t fprs[kNumberOfFRegisters] __attribute__((aligned(8)));
for (size_t i = 0; i < kNumberOfCoreRegisters; ++i) {
gprs[i] = gprs_[i] != nullptr ? *gprs_[i] : MipsContext::kBadGprBase + i;
}
diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S
index 5c950717c4..2c0e75090d 100644
--- a/runtime/arch/mips/jni_entrypoints_mips.S
+++ b/runtime/arch/mips/jni_entrypoints_mips.S
@@ -28,8 +28,9 @@ ENTRY art_jni_dlsym_lookup_stub
.cfi_adjust_cfa_offset 48
sw $ra, 32($sp)
.cfi_rel_offset 31, 32
- SDu $f14, $f15, 24, $sp, $t0
- SDu $f12, $f13, 16, $sp, $t0
+ CHECK_ALIGNMENT $sp, $t0
+ sdc1 $f14, 24($sp)
+ sdc1 $f12, 16($sp)
sw $a3, 12($sp)
.cfi_rel_offset 7, 12
sw $a2, 8($sp)
@@ -45,8 +46,9 @@ ENTRY art_jni_dlsym_lookup_stub
lw $a1, 4($sp)
lw $a2, 8($sp)
lw $a3, 12($sp)
- LDu $f12, $f13, 16, $sp, $t0
- LDu $f14, $f15, 24, $sp, $t0
+ CHECK_ALIGNMENT $sp, $t0
+ ldc1 $f12, 16($sp)
+ ldc1 $f14, 24($sp)
lw $ra, 32($sp)
beq $v0, $zero, .Lno_native_code_found
addiu $sp, $sp, 48 # restore the stack
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index f6204bd8b6..ee3f17d06a 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -37,45 +37,49 @@
* Reserves FRAME_SIZE_SAVE_ALL_CALLEE_SAVES + ARG_SLOT_SIZE bytes on the stack
*/
.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- addiu $sp, $sp, -96
- .cfi_adjust_cfa_offset 96
+ addiu $sp, $sp, -112
+ .cfi_adjust_cfa_offset 112
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 96)
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 112)
#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(MIPS) size not as expected."
#endif
- sw $ra, 92($sp)
- .cfi_rel_offset 31, 92
- sw $s8, 88($sp)
- .cfi_rel_offset 30, 88
- sw $gp, 84($sp)
- .cfi_rel_offset 28, 84
- sw $s7, 80($sp)
- .cfi_rel_offset 23, 80
- sw $s6, 76($sp)
- .cfi_rel_offset 22, 76
- sw $s5, 72($sp)
- .cfi_rel_offset 21, 72
- sw $s4, 68($sp)
- .cfi_rel_offset 20, 68
- sw $s3, 64($sp)
- .cfi_rel_offset 19, 64
- sw $s2, 60($sp)
- .cfi_rel_offset 18, 60
- sw $s1, 56($sp)
- .cfi_rel_offset 17, 56
- sw $s0, 52($sp)
- .cfi_rel_offset 16, 52
-
- SDu $f30, $f31, 44, $sp, $t1
- SDu $f28, $f29, 36, $sp, $t1
- SDu $f26, $f27, 28, $sp, $t1
- SDu $f24, $f25, 20, $sp, $t1
- SDu $f22, $f23, 12, $sp, $t1
- SDu $f20, $f21, 4, $sp, $t1
-
- # 1 word for holding Method*
+ sw $ra, 108($sp)
+ .cfi_rel_offset 31, 108
+ sw $s8, 104($sp)
+ .cfi_rel_offset 30, 104
+ sw $gp, 100($sp)
+ .cfi_rel_offset 28, 100
+ sw $s7, 96($sp)
+ .cfi_rel_offset 23, 96
+ sw $s6, 92($sp)
+ .cfi_rel_offset 22, 92
+ sw $s5, 88($sp)
+ .cfi_rel_offset 21, 88
+ sw $s4, 84($sp)
+ .cfi_rel_offset 20, 84
+ sw $s3, 80($sp)
+ .cfi_rel_offset 19, 80
+ sw $s2, 76($sp)
+ .cfi_rel_offset 18, 76
+ sw $s1, 72($sp)
+ .cfi_rel_offset 17, 72
+ sw $s0, 68($sp)
+ .cfi_rel_offset 16, 68
+ // 4-byte placeholder for register $zero, serving for alignment
+ // of the following double precision floating point registers.
+
+ CHECK_ALIGNMENT $sp, $t1
+ sdc1 $f30, 56($sp)
+ sdc1 $f28, 48($sp)
+ sdc1 $f26, 40($sp)
+ sdc1 $f24, 32($sp)
+ sdc1 $f22, 24($sp)
+ sdc1 $f20, 16($sp)
+
+ # 1 word for holding Method* plus 12 bytes padding to keep contents of SP
+ # a multiple of 16.
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
@@ -216,12 +220,13 @@
.cfi_rel_offset 6, 60
sw $a1, 56($sp)
.cfi_rel_offset 5, 56
- SDu $f18, $f19, 48, $sp, $t8
- SDu $f16, $f17, 40, $sp, $t8
- SDu $f14, $f15, 32, $sp, $t8
- SDu $f12, $f13, 24, $sp, $t8
- SDu $f10, $f11, 16, $sp, $t8
- SDu $f8, $f9, 8, $sp, $t8
+ CHECK_ALIGNMENT $sp, $t8
+ sdc1 $f18, 48($sp)
+ sdc1 $f16, 40($sp)
+ sdc1 $f14, 32($sp)
+ sdc1 $f12, 24($sp)
+ sdc1 $f10, 16($sp)
+ sdc1 $f8, 8($sp)
# bottom will hold Method*
.endm
@@ -320,12 +325,13 @@
lw $a2, 60($sp)
.cfi_restore 6
RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1
- LDu $f18, $f19, 48, $sp, $t8
- LDu $f16, $f17, 40, $sp, $t8
- LDu $f14, $f15, 32, $sp, $t8
- LDu $f12, $f13, 24, $sp, $t8
- LDu $f10, $f11, 16, $sp, $t8
- LDu $f8, $f9, 8, $sp, $t8
+ CHECK_ALIGNMENT $sp, $t8
+ ldc1 $f18, 48($sp)
+ ldc1 $f16, 40($sp)
+ ldc1 $f14, 32($sp)
+ ldc1 $f12, 24($sp)
+ ldc1 $f10, 16($sp)
+ ldc1 $f8, 8($sp)
addiu $sp, $sp, 112 # Pop frame.
.cfi_adjust_cfa_offset -112
.endm
@@ -412,22 +418,23 @@
1:
.cpload $ra
- SDu $f30, $f31, 136, $sp, $t1
- SDu $f28, $f29, 128, $sp, $t1
- SDu $f26, $f27, 120, $sp, $t1
- SDu $f24, $f25, 112, $sp, $t1
- SDu $f22, $f23, 104, $sp, $t1
- SDu $f20, $f21, 96, $sp, $t1
- SDu $f18, $f19, 88, $sp, $t1
- SDu $f16, $f17, 80, $sp, $t1
- SDu $f14, $f15, 72, $sp, $t1
- SDu $f12, $f13, 64, $sp, $t1
- SDu $f10, $f11, 56, $sp, $t1
- SDu $f8, $f9, 48, $sp, $t1
- SDu $f6, $f7, 40, $sp, $t1
- SDu $f4, $f5, 32, $sp, $t1
- SDu $f2, $f3, 24, $sp, $t1
- SDu $f0, $f1, 16, $sp, $t1
+ CHECK_ALIGNMENT $sp, $t1
+ sdc1 $f30, 136($sp)
+ sdc1 $f28, 128($sp)
+ sdc1 $f26, 120($sp)
+ sdc1 $f24, 112($sp)
+ sdc1 $f22, 104($sp)
+ sdc1 $f20, 96($sp)
+ sdc1 $f18, 88($sp)
+ sdc1 $f16, 80($sp)
+ sdc1 $f14, 72($sp)
+ sdc1 $f12, 64($sp)
+ sdc1 $f10, 56($sp)
+ sdc1 $f8, 48($sp)
+ sdc1 $f6, 40($sp)
+ sdc1 $f4, 32($sp)
+ sdc1 $f2, 24($sp)
+ sdc1 $f0, 16($sp)
# 3 words padding and 1 word for holding Method*
@@ -460,22 +467,23 @@
addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack
.cfi_adjust_cfa_offset -ARG_SLOT_SIZE
- LDu $f30, $f31, 136, $sp, $t1
- LDu $f28, $f29, 128, $sp, $t1
- LDu $f26, $f27, 120, $sp, $t1
- LDu $f24, $f25, 112, $sp, $t1
- LDu $f22, $f23, 104, $sp, $t1
- LDu $f20, $f21, 96, $sp, $t1
- LDu $f18, $f19, 88, $sp, $t1
- LDu $f16, $f17, 80, $sp, $t1
- LDu $f14, $f15, 72, $sp, $t1
- LDu $f12, $f13, 64, $sp, $t1
- LDu $f10, $f11, 56, $sp, $t1
- LDu $f8, $f9, 48, $sp, $t1
- LDu $f6, $f7, 40, $sp, $t1
- LDu $f4, $f5, 32, $sp, $t1
- LDu $f2, $f3, 24, $sp, $t1
- LDu $f0, $f1, 16, $sp, $t1
+ CHECK_ALIGNMENT $sp, $t1
+ ldc1 $f30, 136($sp)
+ ldc1 $f28, 128($sp)
+ ldc1 $f26, 120($sp)
+ ldc1 $f24, 112($sp)
+ ldc1 $f22, 104($sp)
+ ldc1 $f20, 96($sp)
+ ldc1 $f18, 88($sp)
+ ldc1 $f16, 80($sp)
+ ldc1 $f14, 72($sp)
+ ldc1 $f12, 64($sp)
+ ldc1 $f10, 56($sp)
+ ldc1 $f8, 48($sp)
+ ldc1 $f6, 40($sp)
+ ldc1 $f4, 32($sp)
+ ldc1 $f2, 24($sp)
+ ldc1 $f0, 16($sp)
lw $ra, 252($sp)
.cfi_restore 31
@@ -665,7 +673,8 @@ ENTRY art_quick_osr_stub
b .Losr_exit
sw $v1, 4($a2) # store v0/v1 into result
.Losr_fp_result:
- SDu $f0, $f1, 0, $a2, $t0 # store f0/f1 into result
+ CHECK_ALIGNMENT $a2, $t0, 8
+ sdc1 $f0, 0($a2) # store f0/f1 into result
.Losr_exit:
lw $ra, 44($sp)
.cfi_restore 31
@@ -701,26 +710,28 @@ ENTRY art_quick_osr_stub
END art_quick_osr_stub
/*
- * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_
+ * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_.
+ * Note that fprs_ is expected to be an address that is a multiple of 8.
* FIXME: just guessing about the shape of the jmpbuf. Where will pc be?
*/
ENTRY art_quick_do_long_jump
- LDu $f0, $f1, 0*8, $a1, $t1
- LDu $f2, $f3, 1*8, $a1, $t1
- LDu $f4, $f5, 2*8, $a1, $t1
- LDu $f6, $f7, 3*8, $a1, $t1
- LDu $f8, $f9, 4*8, $a1, $t1
- LDu $f10, $f11, 5*8, $a1, $t1
- LDu $f12, $f13, 6*8, $a1, $t1
- LDu $f14, $f15, 7*8, $a1, $t1
- LDu $f16, $f17, 8*8, $a1, $t1
- LDu $f18, $f19, 9*8, $a1, $t1
- LDu $f20, $f21, 10*8, $a1, $t1
- LDu $f22, $f23, 11*8, $a1, $t1
- LDu $f24, $f25, 12*8, $a1, $t1
- LDu $f26, $f27, 13*8, $a1, $t1
- LDu $f28, $f29, 14*8, $a1, $t1
- LDu $f30, $f31, 15*8, $a1, $t1
+ CHECK_ALIGNMENT $a1, $t1, 8
+ ldc1 $f0, 0*8($a1)
+ ldc1 $f2, 1*8($a1)
+ ldc1 $f4, 2*8($a1)
+ ldc1 $f6, 3*8($a1)
+ ldc1 $f8, 4*8($a1)
+ ldc1 $f10, 5*8($a1)
+ ldc1 $f12, 6*8($a1)
+ ldc1 $f14, 7*8($a1)
+ ldc1 $f16, 8*8($a1)
+ ldc1 $f18, 9*8($a1)
+ ldc1 $f20, 10*8($a1)
+ ldc1 $f22, 11*8($a1)
+ ldc1 $f24, 12*8($a1)
+ ldc1 $f26, 13*8($a1)
+ ldc1 $f28, 14*8($a1)
+ ldc1 $f30, 15*8($a1)
.set push
.set nomacro
@@ -1067,7 +1078,8 @@ loopEnd:
jalr $zero, $ra
sw $v1, 4($t0) # store the other half of the result
5:
- SDu $f0, $f1, 0, $t0, $t1 # store floating point result
+ CHECK_ALIGNMENT $t0, $t1, 8
+ sdc1 $f0, 0($t0) # store floating point result
jalr $zero, $ra
nop
@@ -1225,7 +1237,8 @@ loopEndS:
jalr $zero, $ra
sw $v1, 4($t0) # store the other half of the result
6:
- SDu $f0, $f1, 0, $t0, $t1 # store floating point result
+ CHECK_ALIGNMENT $t0, $t1, 8
+ sdc1 $f0, 0($t0) # store floating point result
jalr $zero, $ra
nop
@@ -2252,7 +2265,7 @@ ENTRY art_quick_generic_jni_trampoline
move $a0, rSELF # pass Thread::Current
move $a2, $v0 # pass result
move $a3, $v1
- addiu $sp, $sp, -24 # reserve arg slots
+ addiu $sp, $sp, -32 # reserve arg slots
la $t9, artQuickGenericJniEndTrampoline
jalr $t9
s.d $f0, 16($sp) # pass result_f
@@ -3243,7 +3256,8 @@ ENTRY art_quick_invoke_polymorphic
lhu $v0, 16($sp) # Move char from JValue result to return value register.
.Lstore_double_result:
.Lstore_float_result:
- LDu $f0, $f1, 16, $sp, $t0 # Move double/float from JValue result to return value register.
+ CHECK_ALIGNMENT $sp, $t0
+ ldc1 $f0, 16($sp) # Move double/float from JValue result to return value register.
b .Lcleanup_and_return
nop
.Lstore_long_result:
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/quick_method_frame_info_mips.h
index 45a21ab942..8c86252152 100644
--- a/runtime/arch/mips/quick_method_frame_info_mips.h
+++ b/runtime/arch/mips/quick_method_frame_info_mips.h
@@ -35,8 +35,24 @@ static constexpr uint32_t kMipsCalleeSaveRefSpills =
static constexpr uint32_t kMipsCalleeSaveArgSpills =
(1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3) | (1 << art::mips::T0) |
(1 << art::mips::T1);
+// We want to save all floating point register pairs at addresses
+// which are multiples of 8 so that we can eliminate use of the
+// SDu/LDu macros by using sdc1/ldc1 to store/load floating
+// register values using a single instruction. Because integer
+// registers are stored at the top of the frame, to achieve having
+// the floating point register pairs aligned on multiples of 8 the
+// number of integer registers saved must be even. Previously, the
+// only case in which we saved floating point registers beneath an
+// odd number of integer registers was when "type" is
+// CalleeSaveType::kSaveAllCalleeSaves. (There are other cases in
+// which an odd number of integer registers are saved but those
+// cases don't save any floating point registers. If no floating
+// point registers are saved we don't care if the number of integer
+// registers saved is odd or even). To save an even number of
+// integer registers in this particular case we add the ZERO
+// register to the list of registers which get saved.
static constexpr uint32_t kMipsCalleeSaveAllSpills =
- (1 << art::mips::S0) | (1 << art::mips::S1);
+ (1 << art::mips::ZERO) | (1 << art::mips::S0) | (1 << art::mips::S1);
static constexpr uint32_t kMipsCalleeSaveEverythingSpills =
(1 << art::mips::AT) | (1 << art::mips::V0) | (1 << art::mips::V1) |
(1 << art::mips::A0) | (1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3) |
diff --git a/runtime/base/bit_struct.h b/runtime/base/bit_struct.h
index 1f86ee1917..16b555e1c6 100644
--- a/runtime/base/bit_struct.h
+++ b/runtime/base/bit_struct.h
@@ -130,6 +130,18 @@ struct BitStructField {
return kBitWidth;
}
+ BitStructField& operator=(const BitStructField& other) {
+ // Warning. The default operator= will overwrite the entire storage!
+ return *this = static_cast<T>(other);
+ }
+
+ BitStructField(const BitStructField& other) {
+ Assign(*this, static_cast<T>(other));
+ }
+
+ BitStructField() = default;
+ ~BitStructField() = default;
+
protected:
template <typename T2>
T2& Assign(T2& what, T value) {
@@ -265,7 +277,11 @@ using BitStructUint =
#define BITSTRUCT_DEFINE_START(name, bitwidth) \
union name { \
art::detail::DefineBitStructSize<(bitwidth)> _; \
- static constexpr size_t BitStructSizeOf() { return (bitwidth); }
+ static constexpr size_t BitStructSizeOf() { return (bitwidth); } \
+ name& operator=(const name& other) { _ = other._; return *this; } \
+ name(const name& other) : _(other._) {} \
+ name() = default; \
+ ~name() = default;
// End the definition of a bitstruct, and insert a sanity check
// to ensure that the bitstruct did not exceed the specified size.
diff --git a/runtime/base/bit_struct_detail.h b/runtime/base/bit_struct_detail.h
index 9f629c0970..824d7df652 100644
--- a/runtime/base/bit_struct_detail.h
+++ b/runtime/base/bit_struct_detail.h
@@ -56,20 +56,6 @@ namespace detail {
/* else */ type_unsigned>::type;
};
- // Ensure the minimal type storage for 'T' matches its declared BitStructSizeOf.
- // Nominally used by the BITSTRUCT_DEFINE_END macro.
- template <typename T>
- static constexpr bool ValidateBitStructSize() {
- const size_t kBitStructSizeOf = BitStructSizeOf<T>();
- const size_t kExpectedSize = (BitStructSizeOf<T>() < kBitsPerByte)
- ? kBitsPerByte
- : RoundUpToPowerOfTwo(kBitStructSizeOf);
-
- // Ensure no extra fields were added in between START/END.
- const size_t kActualSize = sizeof(T) * kBitsPerByte;
- return kExpectedSize == kActualSize;
- }
-
// Denotes the beginning of a bit struct.
//
// This marker is required by the C++ standard in order to
@@ -84,6 +70,49 @@ namespace detail {
private:
typename MinimumTypeUnsignedHelper<kSize>::type _;
};
+
+ // Check if type "T" has a member called _ in it.
+ template <typename T>
+ struct HasUnderscoreField {
+ private:
+ using TrueT = std::integral_constant<bool, true>::type;
+ using FalseT = std::integral_constant<bool, false>::type;
+
+ template <typename C>
+ static constexpr auto Test(void*) -> decltype(std::declval<C>()._, TrueT{}); // NOLINT
+
+ template <typename>
+ static constexpr FalseT Test(...);
+
+ public:
+ static constexpr bool value = decltype(Test<T>(0))::value;
+ };
+
+ // Infer the type of the member of &T::M.
+ template <typename T, typename M>
+ M GetMemberType(M T:: *);
+
+ // Ensure the minimal type storage for 'T' matches its declared BitStructSizeOf.
+ // Nominally used by the BITSTRUCT_DEFINE_END macro.
+ template <typename T>
+ static constexpr bool ValidateBitStructSize() {
+ static_assert(std::is_union<T>::value, "T must be union");
+ static_assert(std::is_standard_layout<T>::value, "T must be standard-layout");
+ static_assert(HasUnderscoreField<T>::value, "T must have the _ DefineBitStructSize");
+
+ const size_t kBitStructSizeOf = BitStructSizeOf<T>();
+ static_assert(std::is_same<decltype(GetMemberType(&T::_)),
+ DefineBitStructSize<kBitStructSizeOf>>::value,
+ "T::_ must be a DefineBitStructSize of the same size");
+
+ const size_t kExpectedSize = (BitStructSizeOf<T>() < kBitsPerByte)
+ ? kBitsPerByte
+ : RoundUpToPowerOfTwo(kBitStructSizeOf);
+
+ // Ensure no extra fields were added in between START/END.
+ const size_t kActualSize = sizeof(T) * kBitsPerByte;
+ return kExpectedSize == kActualSize;
+ }
} // namespace detail
} // namespace art
diff --git a/runtime/base/bit_struct_test.cc b/runtime/base/bit_struct_test.cc
index 872ada324c..a80d39eb91 100644
--- a/runtime/base/bit_struct_test.cc
+++ b/runtime/base/bit_struct_test.cc
@@ -70,17 +70,10 @@ struct CustomBitStruct {
int8_t data;
};
-template <typename T>
-void ZeroInitialize(T& value) {
- memset(&value, 0, sizeof(T));
- // TODO: replace with value initialization
-}
-
TEST(BitStructs, Custom) {
CustomBitStruct expected(0b1111);
- BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f;
- ZeroInitialize(f);
+ BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f{}; // NOLINT
EXPECT_EQ(1u, sizeof(f));
@@ -102,8 +95,7 @@ TEST(BitStructs, TwoCustom) {
VALIDATE_BITSTRUCT_SIZE(TestTwoCustom);
- TestTwoCustom cst;
- ZeroInitialize(cst);
+ TestTwoCustom cst{}; // NOLINT
// Test the write to most-significant field doesn't clobber least-significant.
cst.f4_a = CustomBitStruct(0b0110);
@@ -130,8 +122,7 @@ TEST(BitStructs, TwoCustom) {
}
TEST(BitStructs, Number) {
- BitStructNumber<uint16_t, /*lsb*/4, /*width*/4> bsn;
- ZeroInitialize(bsn);
+ BitStructNumber<uint16_t, /*lsb*/4, /*width*/4> bsn{}; // NOLINT
EXPECT_EQ(2u, sizeof(bsn));
bsn = 0b1111;
@@ -163,8 +154,7 @@ TEST(BitStructs, Test1) {
EXPECT_EQ(1u, sizeof(u4));
EXPECT_EQ(1u, sizeof(alias_all));
}
- TestBitStruct tst;
- ZeroInitialize(tst);
+ TestBitStruct tst{}; // NOLINT
// Check minimal size selection is correct.
EXPECT_EQ(1u, sizeof(TestBitStruct));
@@ -239,8 +229,7 @@ BITSTRUCT_DEFINE_END(MixedSizeBitStruct);
TEST(BitStructs, Mixed) {
EXPECT_EQ(4u, sizeof(MixedSizeBitStruct));
- MixedSizeBitStruct tst;
- ZeroInitialize(tst);
+ MixedSizeBitStruct tst{}; // NOLINT
// Check operator assignment.
tst.u3 = 0b111u;
@@ -266,4 +255,68 @@ TEST(BitStructs, Mixed) {
EXPECT_EQ(0b10101010101010101011111010100111u, AsUint(tst));
}
+BITSTRUCT_DEFINE_START(TestBitStruct_u8, /* size */ 8)
+ BitStructInt</*lsb*/0, /*width*/3> i3;
+ BitStructUint</*lsb*/3, /*width*/4> u4;
+
+ BitStructUint</*lsb*/0, /*width*/8> alias_all;
+BITSTRUCT_DEFINE_END(TestBitStruct_u8);
+
+TEST(BitStructs, FieldAssignment) {
+ TestBitStruct_u8 all_1s{}; // NOLINT
+ all_1s.alias_all = 0xffu;
+
+ {
+ TestBitStruct_u8 tst{}; // NOLINT
+ tst.i3 = all_1s.i3;
+
+ // Copying a single bitfield does not copy all bitfields.
+ EXPECT_EQ(0b111, tst.alias_all);
+ }
+
+ {
+ TestBitStruct_u8 tst{}; // NOLINT
+ tst.u4 = all_1s.u4;
+
+ // Copying a single bitfield does not copy all bitfields.
+ EXPECT_EQ(0b1111000, tst.alias_all);
+ }
+}
+
+BITSTRUCT_DEFINE_START(NestedStruct, /* size */ 64)
+ BitStructField<MixedSizeBitStruct, /*lsb*/0> mixed_lower;
+ BitStructField<MixedSizeBitStruct, /*lsb*/32> mixed_upper;
+
+ BitStructUint</*lsb*/0, /*width*/64> alias_all;
+BITSTRUCT_DEFINE_END(NestedStruct);
+
+TEST(BitStructs, NestedFieldAssignment) {
+ MixedSizeBitStruct mixed_all_1s{}; // NOLINT
+ mixed_all_1s.alias_all = 0xFFFFFFFFu;
+
+ {
+ NestedStruct xyz{}; // NOLINT
+
+ NestedStruct other{}; // NOLINT
+ other.mixed_upper = mixed_all_1s;
+ other.mixed_lower = mixed_all_1s;
+
+ // Copying a single bitfield does not copy all bitfields.
+ xyz.mixed_lower = other.mixed_lower;
+ EXPECT_EQ(0xFFFFFFFFu, xyz.alias_all);
+ }
+
+ {
+ NestedStruct xyz{}; // NOLINT
+
+ NestedStruct other{}; // NOLINT
+ other.mixed_upper = mixed_all_1s;
+ other.mixed_lower = mixed_all_1s;
+
+ // Copying a single bitfield does not copy all bitfields.
+ xyz.mixed_upper = other.mixed_upper;
+ EXPECT_EQ(0xFFFFFFFF00000000u, xyz.alias_all);
+ }
+}
+
} // namespace art
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index da3c7048b6..5d836545e9 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -388,7 +388,8 @@ inline static constexpr std::make_unsigned_t<T> MaskLeastSignificant(size_t bits
if (bits >= BitSizeOf<T>()) {
return std::numeric_limits<unsigned_T>::max();
} else {
- return static_cast<unsigned_T>((1 << bits) - 1);
+ auto kOne = static_cast<unsigned_T>(1); // Do not truncate for T>size_t.
+ return static_cast<unsigned_T>((kOne << bits) - kOne);
}
}
diff --git a/runtime/base/bit_utils_test.cc b/runtime/base/bit_utils_test.cc
index 0276d8ded2..3a80600b57 100644
--- a/runtime/base/bit_utils_test.cc
+++ b/runtime/base/bit_utils_test.cc
@@ -350,6 +350,8 @@ static_assert(MaskLeastSignificant(1) == 0b1, "TestMaskLeastSignificant#2");
static_assert(MaskLeastSignificant(2) == 0b11, "TestMaskLeastSignificant#3");
static_assert(MaskLeastSignificant<uint8_t>(8) == 0xFF, "TestMaskLeastSignificant#4");
static_assert(MaskLeastSignificant<int8_t>(8) == 0xFF, "TestMaskLeastSignificant#5");
+static_assert(MaskLeastSignificant<uint64_t>(63) == (std::numeric_limits<uint64_t>::max() >> 1u),
+ "TestMaskLeastSignificant#6");
static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/0) == 0xFF, "TestBitFieldClear#1");
static_assert(BitFieldClear(std::numeric_limits<uint32_t>::max(), /*lsb*/0, /*width*/32) == 0x0,
diff --git a/runtime/base/bit_vector-inl.h b/runtime/base/bit_vector-inl.h
index 08877987b1..0e67f77e19 100644
--- a/runtime/base/bit_vector-inl.h
+++ b/runtime/base/bit_vector-inl.h
@@ -65,6 +65,24 @@ inline uint32_t BitVector::IndexIterator::FindIndex(uint32_t start_index) const
return word_index * 32u + CTZ(word);
}
+inline BitVector::IndexIterator::IndexIterator(const BitVector* bit_vector, begin_tag)
+ : bit_storage_(bit_vector->GetRawStorage()),
+ storage_size_(bit_vector->storage_size_),
+ bit_index_(FindIndex(0u)) { }
+
+inline BitVector::IndexIterator::IndexIterator(const BitVector* bit_vector, end_tag)
+ : bit_storage_(bit_vector->GetRawStorage()),
+ storage_size_(bit_vector->storage_size_),
+ bit_index_(BitSize()) { }
+
+inline BitVector::IndexIterator BitVector::IndexContainer::begin() const {
+ return IndexIterator(bit_vector_, IndexIterator::begin_tag());
+}
+
+inline BitVector::IndexIterator BitVector::IndexContainer::end() const {
+ return IndexIterator(bit_vector_, IndexIterator::end_tag());
+}
+
inline void BitVector::ClearAllBits() {
memset(storage_, 0, storage_size_ * kWordBytes);
}
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
index 56090672ce..564092a1a2 100644
--- a/runtime/base/bit_vector.h
+++ b/runtime/base/bit_vector.h
@@ -70,15 +70,8 @@ class BitVector {
struct begin_tag { };
struct end_tag { };
- IndexIterator(const BitVector* bit_vector, begin_tag)
- : bit_storage_(bit_vector->GetRawStorage()),
- storage_size_(bit_vector->storage_size_),
- bit_index_(FindIndex(0u)) { }
-
- IndexIterator(const BitVector* bit_vector, end_tag)
- : bit_storage_(bit_vector->GetRawStorage()),
- storage_size_(bit_vector->storage_size_),
- bit_index_(BitSize()) { }
+ IndexIterator(const BitVector* bit_vector, begin_tag);
+ IndexIterator(const BitVector* bit_vector, end_tag);
uint32_t BitSize() const {
return storage_size_ * kWordBits;
@@ -99,13 +92,8 @@ class BitVector {
public:
explicit IndexContainer(const BitVector* bit_vector) : bit_vector_(bit_vector) { }
- IndexIterator begin() const {
- return IndexIterator(bit_vector_, IndexIterator::begin_tag());
- }
-
- IndexIterator end() const {
- return IndexIterator(bit_vector_, IndexIterator::end_tag());
- }
+ IndexIterator begin() const;
+ IndexIterator end() const;
private:
const BitVector* const bit_vector_;
diff --git a/runtime/base/debug_stack.h b/runtime/base/debug_stack.h
index e19aecb712..886065db30 100644
--- a/runtime/base/debug_stack.h
+++ b/runtime/base/debug_stack.h
@@ -94,11 +94,19 @@ class DebugStackReferenceImpl {
DebugStackReferenceImpl(const DebugStackReferenceImpl& other)
: counter_(other.counter_), ref_count_(counter_->IncrementRefCount()) {
}
+ DebugStackReferenceImpl(DebugStackReferenceImpl&& other)
+ : counter_(other.counter_), ref_count_(other.ref_count_) {
+ other.counter_ = nullptr;
+ }
DebugStackReferenceImpl& operator=(const DebugStackReferenceImpl& other) {
CHECK(counter_ == other.counter_);
return *this;
}
- ~DebugStackReferenceImpl() { counter_->DecrementRefCount(); }
+ ~DebugStackReferenceImpl() {
+ if (counter_ != nullptr) {
+ counter_->DecrementRefCount();
+ }
+ }
void CheckTop() { CHECK_EQ(counter_->GetRefCount(), ref_count_); }
private:
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 189c0d0030..4b56d3b30c 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -664,7 +664,7 @@ class Locks {
// When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
// doesn't try to hold a higher level Mutex.
- #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
+ #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::classlinker_classes_lock_)
static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
diff --git a/runtime/base/scoped_arena_allocator.cc b/runtime/base/scoped_arena_allocator.cc
index 973f9b93ed..7240842d55 100644
--- a/runtime/base/scoped_arena_allocator.cc
+++ b/runtime/base/scoped_arena_allocator.cc
@@ -48,8 +48,7 @@ void ArenaStack::Reset() {
MemStats ArenaStack::GetPeakStats() const {
DebugStackRefCounter::CheckNoRefs();
- return MemStats("ArenaStack peak", static_cast<const TaggedStats<Peak>*>(&stats_and_pool_),
- bottom_arena_);
+ return MemStats("ArenaStack peak", PeakStats(), bottom_arena_);
}
uint8_t* ArenaStack::AllocateFromNextArena(size_t rounded_bytes) {
@@ -107,18 +106,32 @@ void* ArenaStack::AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind) {
return ptr;
}
+ScopedArenaAllocator::ScopedArenaAllocator(ScopedArenaAllocator&& other)
+ : DebugStackReference(std::move(other)),
+ DebugStackRefCounter(),
+ ArenaAllocatorStats(other),
+ arena_stack_(other.arena_stack_),
+ mark_arena_(other.mark_arena_),
+ mark_ptr_(other.mark_ptr_),
+ mark_end_(other.mark_end_) {
+ other.DebugStackRefCounter::CheckNoRefs();
+ other.arena_stack_ = nullptr;
+}
+
ScopedArenaAllocator::ScopedArenaAllocator(ArenaStack* arena_stack)
- : DebugStackReference(arena_stack),
- DebugStackRefCounter(),
- ArenaAllocatorStats(*arena_stack->CurrentStats()),
- arena_stack_(arena_stack),
- mark_arena_(arena_stack->top_arena_),
- mark_ptr_(arena_stack->top_ptr_),
- mark_end_(arena_stack->top_end_) {
+ : DebugStackReference(arena_stack),
+ DebugStackRefCounter(),
+ ArenaAllocatorStats(*arena_stack->CurrentStats()),
+ arena_stack_(arena_stack),
+ mark_arena_(arena_stack->top_arena_),
+ mark_ptr_(arena_stack->top_ptr_),
+ mark_end_(arena_stack->top_end_) {
}
ScopedArenaAllocator::~ScopedArenaAllocator() {
- DoReset();
+ if (arena_stack_ != nullptr) {
+ DoReset();
+ }
}
void ScopedArenaAllocator::Reset() {
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index f156f526fc..8f50fd443b 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -54,6 +54,7 @@ class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryToo
void Reset();
size_t PeakBytesAllocated() {
+ DebugStackRefCounter::CheckNoRefs();
return PeakStats()->BytesAllocated();
}
@@ -81,6 +82,10 @@ class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryToo
return static_cast<TaggedStats<Peak>*>(&stats_and_pool_);
}
+ const ArenaAllocatorStats* PeakStats() const {
+ return static_cast<const TaggedStats<Peak>*>(&stats_and_pool_);
+ }
+
ArenaAllocatorStats* CurrentStats() {
return static_cast<TaggedStats<Current>*>(&stats_and_pool_);
}
@@ -132,16 +137,7 @@ class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryToo
class ScopedArenaAllocator
: private DebugStackReference, private DebugStackRefCounter, private ArenaAllocatorStats {
public:
- // Create a ScopedArenaAllocator directly on the ArenaStack when the scope of
- // the allocator is not exactly a C++ block scope. For example, an optimization
- // pass can create the scoped allocator in Start() and destroy it in End().
- static ScopedArenaAllocator* Create(ArenaStack* arena_stack) {
- void* addr = arena_stack->Alloc(sizeof(ScopedArenaAllocator), kArenaAllocMisc);
- ScopedArenaAllocator* allocator = new(addr) ScopedArenaAllocator(arena_stack);
- allocator->mark_ptr_ = reinterpret_cast<uint8_t*>(addr);
- return allocator;
- }
-
+ ScopedArenaAllocator(ScopedArenaAllocator&& other);
explicit ScopedArenaAllocator(ArenaStack* arena_stack);
~ScopedArenaAllocator();
@@ -173,7 +169,7 @@ class ScopedArenaAllocator
static void operator delete(void* ptr ATTRIBUTE_UNUSED) {}
private:
- ArenaStack* const arena_stack_;
+ ArenaStack* arena_stack_;
Arena* mark_arena_;
uint8_t* mark_ptr_;
uint8_t* mark_end_;
diff --git a/runtime/cdex/compact_dex_file.cc b/runtime/cdex/compact_dex_file.cc
new file mode 100644
index 0000000000..82ffdb0adb
--- /dev/null
+++ b/runtime/cdex/compact_dex_file.cc
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compact_dex_file.h"
+
+namespace art {
+
+constexpr uint8_t CompactDexFile::kDexMagic[kDexMagicSize];
+constexpr uint8_t CompactDexFile::kDexMagicVersion[];
+
+void CompactDexFile::WriteMagic(uint8_t* magic) {
+ std::copy_n(kDexMagic, kDexMagicSize, magic);
+}
+
+void CompactDexFile::WriteCurrentVersion(uint8_t* magic) {
+ std::copy_n(kDexMagicVersion, kDexVersionLen, magic + kDexMagicSize);
+}
+
+bool CompactDexFile::IsMagicValid(const uint8_t* magic) {
+ return (memcmp(magic, kDexMagic, sizeof(kDexMagic)) == 0);
+}
+
+bool CompactDexFile::IsVersionValid(const uint8_t* magic) {
+ const uint8_t* version = &magic[sizeof(kDexMagic)];
+ return memcmp(version, kDexMagicVersion, kDexVersionLen) == 0;
+}
+
+bool CompactDexFile::IsMagicValid() const {
+ return IsMagicValid(header_->magic_);
+}
+
+bool CompactDexFile::IsVersionValid() const {
+ return IsVersionValid(header_->magic_);
+}
+
+} // namespace art
diff --git a/runtime/cdex/compact_dex_file.h b/runtime/cdex/compact_dex_file.h
new file mode 100644
index 0000000000..3c1b638f03
--- /dev/null
+++ b/runtime/cdex/compact_dex_file.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_CDEX_COMPACT_DEX_FILE_H_
+#define ART_RUNTIME_CDEX_COMPACT_DEX_FILE_H_
+
+#include "dex_file.h"
+
+namespace art {
+
+// CompactDex is a currently ART internal dex file format that aims to reduce storage/RAM usage.
+class CompactDexFile : public DexFile {
+ public:
+ static constexpr uint8_t kDexMagic[kDexMagicSize] = { 'c', 'd', 'e', 'x' };
+ static constexpr uint8_t kDexMagicVersion[] = {'0', '0', '1', '\0'};
+
+ // Write the compact dex specific magic.
+ static void WriteMagic(uint8_t* magic);
+
+ // Write the current version, note that the input is the address of the magic.
+ static void WriteCurrentVersion(uint8_t* magic);
+
+ // Returns true if the byte string points to the magic value.
+ static bool IsMagicValid(const uint8_t* magic);
+ virtual bool IsMagicValid() const OVERRIDE;
+
+ // Returns true if the byte string after the magic is the correct value.
+ static bool IsVersionValid(const uint8_t* magic);
+ virtual bool IsVersionValid() const OVERRIDE;
+
+ bool IsCompactDexFile() const OVERRIDE {
+ return true;
+ }
+
+ private:
+ // Not supported yet.
+ CompactDexFile(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ DexFileContainer* container)
+ : DexFile(base, size, location, location_checksum, oat_dex_file, container) {}
+
+ friend class DexFile;
+ friend class DexFileLoader;
+
+ DISALLOW_COPY_AND_ASSIGN(CompactDexFile);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_CDEX_COMPACT_DEX_FILE_H_
diff --git a/runtime/cdex/compact_dex_file_test.cc b/runtime/cdex/compact_dex_file_test.cc
new file mode 100644
index 0000000000..b43b35d69a
--- /dev/null
+++ b/runtime/cdex/compact_dex_file_test.cc
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "cdex/compact_dex_file.h"
+#include "dex_file_loader.h"
+#include "common_runtime_test.h"
+
+namespace art {
+
+class CompactDexFileTest : public CommonRuntimeTest {};
+
+TEST_F(CompactDexFileTest, MagicAndVersion) {
+ // Test permutations of valid/invalid headers.
+ for (size_t i = 0; i < 2; ++i) {
+ for (size_t j = 0; j < 2; ++j) {
+ static const size_t len = CompactDexFile::kDexVersionLen + CompactDexFile::kDexMagicSize;
+ uint8_t header[len] = {};
+ std::fill_n(header, len, 0x99);
+ const bool valid_magic = (i & 1) == 0;
+ const bool valid_version = (j & 1) == 0;
+ if (valid_magic) {
+ CompactDexFile::WriteMagic(header);
+ }
+ if (valid_version) {
+ CompactDexFile::WriteCurrentVersion(header);
+ }
+ EXPECT_EQ(valid_magic, CompactDexFile::IsMagicValid(header));
+ EXPECT_EQ(valid_version, CompactDexFile::IsVersionValid(header));
+ EXPECT_EQ(valid_magic, DexFileLoader::IsMagicValid(header));
+ EXPECT_EQ(valid_magic && valid_version, DexFileLoader::IsVersionAndMagicValid(header));
+ }
+ }
+}
+
+} // namespace art
diff --git a/runtime/cdex/compact_dex_level.h b/runtime/cdex/compact_dex_level.h
new file mode 100644
index 0000000000..b824462bf0
--- /dev/null
+++ b/runtime/cdex/compact_dex_level.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_CDEX_COMPACT_DEX_LEVEL_H_
+#define ART_RUNTIME_CDEX_COMPACT_DEX_LEVEL_H_
+
+#include "dex_file.h"
+
+namespace art {
+
+// Optimization level for compact dex generation.
+enum class CompactDexLevel {
+ // Level none means not generated.
+ kCompactDexLevelNone,
+ // Level fast means optimizations that don't take many resources to perform.
+ kCompactDexLevelFast,
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_CDEX_COMPACT_DEX_LEVEL_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index fe91272ef7..b199933ae4 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -99,7 +99,7 @@
#include "mirror/stack_trace_element.h"
#include "mirror/string-inl.h"
#include "native/dalvik_system_DexFile.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "oat.h"
#include "oat_file-inl.h"
#include "oat_file.h"
@@ -1819,6 +1819,7 @@ bool ClassLinker::AddImageSpace(
if (kIsDebugBuild && app_image) {
// This verification needs to happen after the classes have been added to the class loader.
// Since it ensures classes are in the class table.
+ ScopedTrace trace("VerifyAppImage");
VerifyAppImage(header, class_loader, dex_caches, class_table, space);
}
@@ -7707,12 +7708,6 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file,
Thread::PoisonObjectPointersIfDebug();
ObjPtr<mirror::Class> resolved = dex_cache->GetResolvedType(type_idx);
if (resolved == nullptr) {
- // TODO: Avoid this lookup as it duplicates work done in FindClass(). It is here
- // as a workaround for FastNative JNI to avoid AssertNoPendingException() when
- // trying to resolve annotations while an exception may be pending. Bug: 34659969
- resolved = LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get());
- }
- if (resolved == nullptr) {
Thread* self = Thread::Current();
const char* descriptor = dex_file.StringByTypeIdx(type_idx);
resolved = FindClass(self, descriptor, class_loader);
@@ -7981,7 +7976,8 @@ ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
return resolved;
}
-mirror::MethodType* ClassLinker::ResolveMethodType(const DexFile& dex_file,
+mirror::MethodType* ClassLinker::ResolveMethodType(Thread* self,
+ const DexFile& dex_file,
uint32_t proto_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader) {
@@ -7993,7 +7989,6 @@ mirror::MethodType* ClassLinker::ResolveMethodType(const DexFile& dex_file,
return resolved.Ptr();
}
- Thread* const self = Thread::Current();
StackHandleScope<4> hs(self);
// First resolve the return type.
@@ -8043,13 +8038,14 @@ mirror::MethodType* ClassLinker::ResolveMethodType(const DexFile& dex_file,
return type.Get();
}
-mirror::MethodType* ClassLinker::ResolveMethodType(uint32_t proto_idx, ArtMethod* referrer) {
- Thread* const self = Thread::Current();
+mirror::MethodType* ClassLinker::ResolveMethodType(Thread* self,
+ uint32_t proto_idx,
+ ArtMethod* referrer) {
StackHandleScope<2> hs(self);
const DexFile* dex_file = referrer->GetDexFile();
Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
- return ResolveMethodType(*dex_file, proto_idx, dex_cache, class_loader);
+ return ResolveMethodType(self, *dex_file, proto_idx, dex_cache, class_loader);
}
mirror::MethodHandle* ClassLinker::ResolveMethodHandleForField(
@@ -8344,10 +8340,10 @@ mirror::MethodHandle* ClassLinker::ResolveMethodHandleForMethod(
return mirror::MethodHandleImpl::Create(self, target, kind, method_type);
}
-mirror::MethodHandle* ClassLinker::ResolveMethodHandle(uint32_t method_handle_idx,
+mirror::MethodHandle* ClassLinker::ResolveMethodHandle(Thread* self,
+ uint32_t method_handle_idx,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
- Thread* const self = Thread::Current();
const DexFile* const dex_file = referrer->GetDexFile();
const DexFile::MethodHandleItem& method_handle = dex_file->GetMethodHandle(method_handle_idx);
switch (static_cast<DexFile::MethodHandleType>(method_handle.method_handle_type_)) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index e436b99c4d..eba202228c 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -364,20 +364,23 @@ class ClassLinker {
// Resolve a method type with a given ID from the DexFile, storing
// the result in the DexCache.
- mirror::MethodType* ResolveMethodType(const DexFile& dex_file,
+ mirror::MethodType* ResolveMethodType(Thread* self,
+ const DexFile& dex_file,
uint32_t proto_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
- mirror::MethodType* ResolveMethodType(uint32_t proto_idx, ArtMethod* referrer)
+ mirror::MethodType* ResolveMethodType(Thread* self, uint32_t proto_idx, ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a method handle with a given ID from the DexFile. The
// result is not cached in the DexCache as the instance will only be
// used once in most circumstances.
- mirror::MethodHandle* ResolveMethodHandle(uint32_t method_handle_idx, ArtMethod* referrer)
+ mirror::MethodHandle* ResolveMethodHandle(Thread* self,
+ uint32_t method_handle_idx,
+ ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true on success, false if there's an exception pending.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 6ea1fbe195..3e92317682 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -1466,6 +1466,7 @@ TEST_F(ClassLinkerTest, RegisterDexFileName) {
old_dex_file->Size(),
location->ToModifiedUtf8(),
0u,
+ nullptr,
nullptr));
{
WriterMutexLock mu(soa.Self(), *Locks::dex_lock_);
@@ -1476,7 +1477,6 @@ TEST_F(ClassLinkerTest, RegisterDexFileName) {
TEST_F(ClassLinkerMethodHandlesTest, TestResolveMethodTypes) {
ScopedObjectAccess soa(Thread::Current());
-
StackHandleScope<7> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
@@ -1494,7 +1494,7 @@ TEST_F(ClassLinkerMethodHandlesTest, TestResolveMethodTypes) {
const DexFile& dex_file = *(method1->GetDexFile());
Handle<mirror::DexCache> dex_cache = hs.NewHandle(
- class_linker_->FindDexCache(Thread::Current(), dex_file));
+ class_linker_->FindDexCache(soa.Self(), dex_file));
const DexFile::MethodId& method1_id = dex_file.GetMethodId(method1->GetDexMethodIndex());
@@ -1503,20 +1503,29 @@ TEST_F(ClassLinkerMethodHandlesTest, TestResolveMethodTypes) {
// Its RType = Ljava/lang/String;
// Its PTypes = { Ljava/lang/String; }
Handle<mirror::MethodType> method1_type = hs.NewHandle(
- class_linker_->ResolveMethodType(dex_file, method1_id.proto_idx_, dex_cache, class_loader));
+ class_linker_->ResolveMethodType(soa.Self(),
+ dex_file,
+ method1_id.proto_idx_,
+ dex_cache,
+ class_loader));
// Assert that the method type was resolved successfully.
ASSERT_TRUE(method1_type != nullptr);
// Assert that the return type and the method arguments are as we expect.
- Handle<mirror::Class> string_class(
- hs.NewHandle(class_linker_->FindClass(soa.Self(), "Ljava/lang/String;", class_loader)));
+ Handle<mirror::Class> string_class(hs.NewHandle(class_linker_->FindClass(soa.Self(),
+ "Ljava/lang/String;",
+ class_loader)));
ASSERT_EQ(string_class.Get(), method1_type->GetRType());
ASSERT_EQ(string_class.Get(), method1_type->GetPTypes()->Get(0));
// Resolve the method type again and assert that we get back the same value.
Handle<mirror::MethodType> method1_type2 = hs.NewHandle(
- class_linker_->ResolveMethodType(dex_file, method1_id.proto_idx_, dex_cache, class_loader));
+ class_linker_->ResolveMethodType(soa.Self(),
+ dex_file,
+ method1_id.proto_idx_,
+ dex_cache,
+ class_loader));
ASSERT_EQ(method1_type.Get(), method1_type2.Get());
// Resolve the MethodType associated with a different method signature
@@ -1529,8 +1538,11 @@ TEST_F(ClassLinkerMethodHandlesTest, TestResolveMethodTypes) {
ASSERT_FALSE(method2->IsDirect());
const DexFile::MethodId& method2_id = dex_file.GetMethodId(method2->GetDexMethodIndex());
Handle<mirror::MethodType> method2_type = hs.NewHandle(
- class_linker_->ResolveMethodType(dex_file, method2_id.proto_idx_, dex_cache, class_loader));
-
+ class_linker_->ResolveMethodType(soa.Self(),
+ dex_file,
+ method2_id.proto_idx_,
+ dex_cache,
+ class_loader));
ASSERT_TRUE(method1_type.Get() != method2_type.Get());
}
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index 167533d68a..38f59efdf7 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -230,6 +230,7 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla
// contents. So pass true to verify_checksum.
if (!DexFileLoader::Open(location.c_str(),
location.c_str(),
+ Runtime::Current()->IsVerificationEnabled(),
/*verify_checksum*/ true,
&error_msg,
&info.opened_dex_files)) {
diff --git a/runtime/common_dex_operations.h b/runtime/common_dex_operations.h
index fcc5393490..6a78637ab1 100644
--- a/runtime/common_dex_operations.h
+++ b/runtime/common_dex_operations.h
@@ -17,10 +17,20 @@
#ifndef ART_RUNTIME_COMMON_DEX_OPERATIONS_H_
#define ART_RUNTIME_COMMON_DEX_OPERATIONS_H_
+#include "android-base/logging.h"
#include "art_field.h"
#include "art_method.h"
+#include "base/macros.h"
+#include "base/mutex.h"
#include "class_linker.h"
+#include "handle_scope-inl.h"
+#include "instrumentation.h"
+#include "interpreter/shadow_frame.h"
#include "interpreter/unstarted_runtime.h"
+#include "mirror/class.h"
+#include "mirror/object.h"
+#include "obj_ptr-inl.h"
+#include "primitive.h"
#include "runtime.h"
#include "stack.h"
#include "thread.h"
@@ -61,6 +71,18 @@ inline void PerformCall(Thread* self,
}
}
+template <typename T>
+inline void DCheckStaticState(Thread* self, T* entity) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kIsDebugBuild) {
+ ObjPtr<mirror::Class> klass = entity->GetDeclaringClass();
+ if (entity->IsStatic()) {
+ klass->AssertInitializedOrInitializingInThread(self);
+ } else {
+ CHECK(klass->IsInitializing() || klass->IsErroneousResolved());
+ }
+ }
+}
+
template<Primitive::Type field_type>
static ALWAYS_INLINE bool DoFieldGetCommon(Thread* self,
const ShadowFrame& shadow_frame,
@@ -68,7 +90,7 @@ static ALWAYS_INLINE bool DoFieldGetCommon(Thread* self,
ArtField* field,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
- field->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
+ DCheckStaticState(self, field);
// Report this field access to instrumentation if needed.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
@@ -126,7 +148,7 @@ ALWAYS_INLINE bool DoFieldPutCommon(Thread* self,
ArtField* field,
JValue& value)
REQUIRES_SHARED(Locks::mutator_lock_) {
- field->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
+ DCheckStaticState(self, field);
// Report this field access to instrumentation if needed. Since we only have the offset of
// the field from the base of the object, we need to look for it first.
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 0c2e49010e..149c33fa87 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -21,7 +21,7 @@
#include <fcntl.h>
#include <stdlib.h>
#include <cstdio>
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "../../external/icu/icu4c/source/common/unicode/uvernum.h"
#include "android-base/stringprintf.h"
@@ -373,7 +373,8 @@ std::unique_ptr<const DexFile> CommonRuntimeTestImpl::LoadExpectSingleDexFile(
std::string error_msg;
MemMap::Init();
static constexpr bool kVerifyChecksum = true;
- if (!DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(
+ location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
LOG(FATAL) << "Could not open .dex file '" << location << "': " << error_msg << "\n";
UNREACHABLE();
} else {
@@ -572,8 +573,11 @@ std::vector<std::unique_ptr<const DexFile>> CommonRuntimeTestImpl::OpenTestDexFi
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- bool success = DexFileLoader::Open(
- filename.c_str(), filename.c_str(), kVerifyChecksum, &error_msg, &dex_files);
+ bool success = DexFileLoader::Open(filename.c_str(),
+ filename.c_str(),
+ /* verify */ true,
+ kVerifyChecksum,
+ &error_msg, &dex_files);
CHECK(success) << "Failed to open '" << filename << "': " << error_msg;
for (auto& dex_file : dex_files) {
CHECK_EQ(PROT_READ, dex_file->GetPermissions());
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index bf2e7062ad..1e77753e87 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -31,7 +31,7 @@
#include "mirror/method_type.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr-inl.h"
#include "thread.h"
#include "verifier/method_verifier.h"
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b021ff1734..8898afe116 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -55,8 +55,8 @@
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedPrimitiveArray.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_primitive_array.h"
#include "obj_ptr-inl.h"
#include "reflection.h"
#include "safe_map.h"
@@ -346,6 +346,10 @@ bool DebuggerActiveMethodInspectionCallback::IsMethodBeingInspected(ArtMethod* m
return Dbg::IsDebuggerActive();
}
+bool DebuggerActiveMethodInspectionCallback::IsMethodSafeToJit(ArtMethod* m) {
+ return !Dbg::MethodHasAnyBreakpoints(m);
+}
+
// Breakpoints.
static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 18126b1eed..ec37833f6d 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -55,6 +55,7 @@ class Thread;
struct DebuggerActiveMethodInspectionCallback : public MethodInspectionCallback {
bool IsMethodBeingInspected(ArtMethod* m ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsMethodSafeToJit(ArtMethod* m) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index a9bb95480e..57cef3de47 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -84,6 +84,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
std::vector<std::unique_ptr<const DexFile>> multi1;
ASSERT_TRUE(DexFileLoader::Open(GetMultiDexSrc1().c_str(),
GetMultiDexSrc1().c_str(),
+ /* verify */ true,
kVerifyChecksum,
&error_msg,
&multi1)) << error_msg;
@@ -92,6 +93,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
std::vector<std::unique_ptr<const DexFile>> multi2;
ASSERT_TRUE(DexFileLoader::Open(GetMultiDexSrc2().c_str(),
GetMultiDexSrc2().c_str(),
+ /* verify */ true,
kVerifyChecksum,
&error_msg,
&multi2)) << error_msg;
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index 1b7c31859c..5dfbd9b6a1 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_DEX_FILE_INL_H_
#include "base/bit_utils.h"
+#include "base/casts.h"
#include "base/logging.h"
#include "base/stringpiece.h"
#include "dex_file.h"
@@ -220,6 +221,280 @@ InvokeType ClassDataItemIterator::GetMethodInvokeType(const DexFile::ClassDef& c
}
}
+template<typename NewLocalCallback, typename IndexToStringData, typename TypeIndexToStringData>
+bool DexFile::DecodeDebugLocalInfo(const uint8_t* stream,
+ const std::string& location,
+ const char* declaring_class_descriptor,
+ const std::vector<const char*>& arg_descriptors,
+ const std::string& method_name,
+ bool is_static,
+ uint16_t registers_size,
+ uint16_t ins_size,
+ uint16_t insns_size_in_code_units,
+ IndexToStringData index_to_string_data,
+ TypeIndexToStringData type_index_to_string_data,
+ NewLocalCallback new_local_callback,
+ void* context) {
+ if (stream == nullptr) {
+ return false;
+ }
+ std::vector<LocalInfo> local_in_reg(registers_size);
+
+ uint16_t arg_reg = registers_size - ins_size;
+ if (!is_static) {
+ const char* descriptor = declaring_class_descriptor;
+ local_in_reg[arg_reg].name_ = "this";
+ local_in_reg[arg_reg].descriptor_ = descriptor;
+ local_in_reg[arg_reg].signature_ = nullptr;
+ local_in_reg[arg_reg].start_address_ = 0;
+ local_in_reg[arg_reg].reg_ = arg_reg;
+ local_in_reg[arg_reg].is_live_ = true;
+ arg_reg++;
+ }
+
+ DecodeUnsignedLeb128(&stream); // Line.
+ uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
+ uint32_t i;
+ if (parameters_size != arg_descriptors.size()) {
+ LOG(ERROR) << "invalid stream - problem with parameter iterator in " << location
+ << " for method " << method_name;
+ return false;
+ }
+ for (i = 0; i < parameters_size && i < arg_descriptors.size(); ++i) {
+ if (arg_reg >= registers_size) {
+ LOG(ERROR) << "invalid stream - arg reg >= reg size (" << arg_reg
+ << " >= " << registers_size << ") in " << location;
+ return false;
+ }
+ uint32_t name_idx = DecodeUnsignedLeb128P1(&stream);
+ const char* descriptor = arg_descriptors[i];
+ local_in_reg[arg_reg].name_ = index_to_string_data(name_idx);
+ local_in_reg[arg_reg].descriptor_ = descriptor;
+ local_in_reg[arg_reg].signature_ = nullptr;
+ local_in_reg[arg_reg].start_address_ = 0;
+ local_in_reg[arg_reg].reg_ = arg_reg;
+ local_in_reg[arg_reg].is_live_ = true;
+ switch (*descriptor) {
+ case 'D':
+ case 'J':
+ arg_reg += 2;
+ break;
+ default:
+ arg_reg += 1;
+ break;
+ }
+ }
+
+ uint32_t address = 0;
+ for (;;) {
+ uint8_t opcode = *stream++;
+ switch (opcode) {
+ case DBG_END_SEQUENCE:
+ // Emit all variables which are still alive at the end of the method.
+ for (uint16_t reg = 0; reg < registers_size; reg++) {
+ if (local_in_reg[reg].is_live_) {
+ local_in_reg[reg].end_address_ = insns_size_in_code_units;
+ new_local_callback(context, local_in_reg[reg]);
+ }
+ }
+ return true;
+ case DBG_ADVANCE_PC:
+ address += DecodeUnsignedLeb128(&stream);
+ break;
+ case DBG_ADVANCE_LINE:
+ DecodeSignedLeb128(&stream); // Line.
+ break;
+ case DBG_START_LOCAL:
+ case DBG_START_LOCAL_EXTENDED: {
+ uint16_t reg = DecodeUnsignedLeb128(&stream);
+ if (reg >= registers_size) {
+ LOG(ERROR) << "invalid stream - reg >= reg size (" << reg << " >= "
+ << registers_size << ") in " << location;
+ return false;
+ }
+
+ uint32_t name_idx = DecodeUnsignedLeb128P1(&stream);
+ uint16_t descriptor_idx = DecodeUnsignedLeb128P1(&stream);
+ uint32_t signature_idx = dex::kDexNoIndex;
+ if (opcode == DBG_START_LOCAL_EXTENDED) {
+ signature_idx = DecodeUnsignedLeb128P1(&stream);
+ }
+
+ // Emit what was previously there, if anything
+ if (local_in_reg[reg].is_live_) {
+ local_in_reg[reg].end_address_ = address;
+ new_local_callback(context, local_in_reg[reg]);
+ }
+
+ local_in_reg[reg].name_ = index_to_string_data(name_idx);
+ local_in_reg[reg].descriptor_ = type_index_to_string_data(descriptor_idx);;
+ local_in_reg[reg].signature_ = index_to_string_data(signature_idx);
+ local_in_reg[reg].start_address_ = address;
+ local_in_reg[reg].reg_ = reg;
+ local_in_reg[reg].is_live_ = true;
+ break;
+ }
+ case DBG_END_LOCAL: {
+ uint16_t reg = DecodeUnsignedLeb128(&stream);
+ if (reg >= registers_size) {
+ LOG(ERROR) << "invalid stream - reg >= reg size (" << reg << " >= "
+ << registers_size << ") in " << location;
+ return false;
+ }
+ // If the register is live, close it properly. Otherwise, closing an already
+ // closed register is sloppy, but harmless if no further action is taken.
+ if (local_in_reg[reg].is_live_) {
+ local_in_reg[reg].end_address_ = address;
+ new_local_callback(context, local_in_reg[reg]);
+ local_in_reg[reg].is_live_ = false;
+ }
+ break;
+ }
+ case DBG_RESTART_LOCAL: {
+ uint16_t reg = DecodeUnsignedLeb128(&stream);
+ if (reg >= registers_size) {
+ LOG(ERROR) << "invalid stream - reg >= reg size (" << reg << " >= "
+ << registers_size << ") in " << location;
+ return false;
+ }
+ // If the register is live, the "restart" is superfluous,
+ // and we don't want to mess with the existing start address.
+ if (!local_in_reg[reg].is_live_) {
+ local_in_reg[reg].start_address_ = address;
+ local_in_reg[reg].is_live_ = true;
+ }
+ break;
+ }
+ case DBG_SET_PROLOGUE_END:
+ case DBG_SET_EPILOGUE_BEGIN:
+ break;
+ case DBG_SET_FILE:
+ DecodeUnsignedLeb128P1(&stream); // name.
+ break;
+ default:
+ address += (opcode - DBG_FIRST_SPECIAL) / DBG_LINE_RANGE;
+ break;
+ }
+ }
+}
+
+template<typename NewLocalCallback>
+bool DexFile::DecodeDebugLocalInfo(const CodeItem* code_item,
+ bool is_static,
+ uint32_t method_idx,
+ NewLocalCallback new_local_callback,
+ void* context) const {
+ if (code_item == nullptr) {
+ return false;
+ }
+ std::vector<const char*> arg_descriptors;
+ DexFileParameterIterator it(*this, GetMethodPrototype(GetMethodId(method_idx)));
+ for (; it.HasNext(); it.Next()) {
+ arg_descriptors.push_back(it.GetDescriptor());
+ }
+ return DecodeDebugLocalInfo(GetDebugInfoStream(code_item),
+ GetLocation(),
+ GetMethodDeclaringClassDescriptor(GetMethodId(method_idx)),
+ arg_descriptors,
+ this->PrettyMethod(method_idx),
+ is_static,
+ code_item->registers_size_,
+ code_item->ins_size_,
+ code_item->insns_size_in_code_units_,
+ [this](uint32_t idx) {
+ return StringDataByIdx(dex::StringIndex(idx));
+ },
+ [this](uint32_t idx) {
+ return StringByTypeIdx(dex::TypeIndex(
+ dchecked_integral_cast<uint16_t>(idx)));
+ },
+ new_local_callback,
+ context);
+}
+
+template<typename DexDebugNewPosition, typename IndexToStringData>
+bool DexFile::DecodeDebugPositionInfo(const uint8_t* stream,
+ IndexToStringData index_to_string_data,
+ DexDebugNewPosition position_functor,
+ void* context) {
+ if (stream == nullptr) {
+ return false;
+ }
+
+ PositionInfo entry = PositionInfo();
+ entry.line_ = DecodeUnsignedLeb128(&stream);
+ uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
+ for (uint32_t i = 0; i < parameters_size; ++i) {
+ DecodeUnsignedLeb128P1(&stream); // Parameter name.
+ }
+
+ for (;;) {
+ uint8_t opcode = *stream++;
+ switch (opcode) {
+ case DBG_END_SEQUENCE:
+ return true; // end of stream.
+ case DBG_ADVANCE_PC:
+ entry.address_ += DecodeUnsignedLeb128(&stream);
+ break;
+ case DBG_ADVANCE_LINE:
+ entry.line_ += DecodeSignedLeb128(&stream);
+ break;
+ case DBG_START_LOCAL:
+ DecodeUnsignedLeb128(&stream); // reg.
+ DecodeUnsignedLeb128P1(&stream); // name.
+ DecodeUnsignedLeb128P1(&stream); // descriptor.
+ break;
+ case DBG_START_LOCAL_EXTENDED:
+ DecodeUnsignedLeb128(&stream); // reg.
+ DecodeUnsignedLeb128P1(&stream); // name.
+ DecodeUnsignedLeb128P1(&stream); // descriptor.
+ DecodeUnsignedLeb128P1(&stream); // signature.
+ break;
+ case DBG_END_LOCAL:
+ case DBG_RESTART_LOCAL:
+ DecodeUnsignedLeb128(&stream); // reg.
+ break;
+ case DBG_SET_PROLOGUE_END:
+ entry.prologue_end_ = true;
+ break;
+ case DBG_SET_EPILOGUE_BEGIN:
+ entry.epilogue_begin_ = true;
+ break;
+ case DBG_SET_FILE: {
+ uint32_t name_idx = DecodeUnsignedLeb128P1(&stream);
+ entry.source_file_ = index_to_string_data(name_idx);
+ break;
+ }
+ default: {
+ int adjopcode = opcode - DBG_FIRST_SPECIAL;
+ entry.address_ += adjopcode / DBG_LINE_RANGE;
+ entry.line_ += DBG_LINE_BASE + (adjopcode % DBG_LINE_RANGE);
+ if (position_functor(context, entry)) {
+ return true; // early exit.
+ }
+ entry.prologue_end_ = false;
+ entry.epilogue_begin_ = false;
+ break;
+ }
+ }
+ }
+}
+
+template<typename DexDebugNewPosition>
+bool DexFile::DecodeDebugPositionInfo(const CodeItem* code_item,
+ DexDebugNewPosition position_functor,
+ void* context) const {
+ if (code_item == nullptr) {
+ return false;
+ }
+ return DecodeDebugPositionInfo(GetDebugInfoStream(code_item),
+ [this](uint32_t idx) {
+ return StringDataByIdx(dex::StringIndex(idx));
+ },
+ position_functor,
+ context);
+}
+
} // namespace art
#endif // ART_RUNTIME_DEX_FILE_INL_H_
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 08c047d8e9..974c7acbb2 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -16,13 +16,10 @@
#include "dex_file.h"
-#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/file.h>
-#include <sys/mman.h> // For the PROT_* and MAP_* constants.
#include <zlib.h>
#include <memory>
@@ -35,11 +32,7 @@
#include "base/logging.h"
#include "base/stl_util.h"
#include "dex_file-inl.h"
-#include "dex_file_loader.h"
-#include "jvalue.h"
#include "leb128.h"
-#include "mem_map.h"
-#include "os.h"
#include "standard_dex_file.h"
#include "utf-inl.h"
#include "utils.h"
@@ -59,46 +52,32 @@ uint32_t DexFile::CalculateChecksum() const {
return adler32(adler32(0L, Z_NULL, 0), non_sum_ptr, Size() - non_sum);
}
-struct DexFile::AnnotationValue {
- JValue value_;
- uint8_t type_;
-};
-
int DexFile::GetPermissions() const {
- if (mem_map_.get() == nullptr) {
- return 0;
- } else {
- return mem_map_->GetProtect();
- }
+ CHECK(container_.get() != nullptr);
+ return container_->GetPermissions();
}
bool DexFile::IsReadOnly() const {
- return GetPermissions() == PROT_READ;
+ CHECK(container_.get() != nullptr);
+ return container_->IsReadOnly();
}
bool DexFile::EnableWrite() const {
- CHECK(IsReadOnly());
- if (mem_map_.get() == nullptr) {
- return false;
- } else {
- return mem_map_->Protect(PROT_READ | PROT_WRITE);
- }
+ CHECK(container_.get() != nullptr);
+ return container_->EnableWrite();
}
bool DexFile::DisableWrite() const {
- CHECK(!IsReadOnly());
- if (mem_map_.get() == nullptr) {
- return false;
- } else {
- return mem_map_->Protect(PROT_READ);
- }
+ CHECK(container_.get() != nullptr);
+ return container_->DisableWrite();
}
DexFile::DexFile(const uint8_t* base,
size_t size,
const std::string& location,
uint32_t location_checksum,
- const OatDexFile* oat_dex_file)
+ const OatDexFile* oat_dex_file,
+ DexFileContainer* container)
: begin_(base),
size_(size),
location_(location),
@@ -114,7 +93,8 @@ DexFile::DexFile(const uint8_t* base,
num_method_handles_(0),
call_site_ids_(nullptr),
num_call_site_ids_(0),
- oat_dex_file_(oat_dex_file) {
+ oat_dex_file_(oat_dex_file),
+ container_(container) {
CHECK(begin_ != nullptr) << GetLocation();
CHECK_GT(size_, 0U) << GetLocation();
// Check base (=header) alignment.
@@ -537,228 +517,6 @@ int32_t DexFile::FindCatchHandlerOffset(const CodeItem &code_item, uint32_t addr
}
}
-bool DexFile::DecodeDebugLocalInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx,
- DexDebugNewLocalCb local_cb, void* context) const {
- DCHECK(local_cb != nullptr);
- if (code_item == nullptr) {
- return false;
- }
- const uint8_t* stream = GetDebugInfoStream(code_item);
- if (stream == nullptr) {
- return false;
- }
- std::vector<LocalInfo> local_in_reg(code_item->registers_size_);
-
- uint16_t arg_reg = code_item->registers_size_ - code_item->ins_size_;
- if (!is_static) {
- const char* descriptor = GetMethodDeclaringClassDescriptor(GetMethodId(method_idx));
- local_in_reg[arg_reg].name_ = "this";
- local_in_reg[arg_reg].descriptor_ = descriptor;
- local_in_reg[arg_reg].signature_ = nullptr;
- local_in_reg[arg_reg].start_address_ = 0;
- local_in_reg[arg_reg].reg_ = arg_reg;
- local_in_reg[arg_reg].is_live_ = true;
- arg_reg++;
- }
-
- DexFileParameterIterator it(*this, GetMethodPrototype(GetMethodId(method_idx)));
- DecodeUnsignedLeb128(&stream); // Line.
- uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
- uint32_t i;
- for (i = 0; i < parameters_size && it.HasNext(); ++i, it.Next()) {
- if (arg_reg >= code_item->registers_size_) {
- LOG(ERROR) << "invalid stream - arg reg >= reg size (" << arg_reg
- << " >= " << code_item->registers_size_ << ") in " << GetLocation();
- return false;
- }
- uint32_t name_idx = DecodeUnsignedLeb128P1(&stream);
- const char* descriptor = it.GetDescriptor();
- local_in_reg[arg_reg].name_ = StringDataByIdx(dex::StringIndex(name_idx));
- local_in_reg[arg_reg].descriptor_ = descriptor;
- local_in_reg[arg_reg].signature_ = nullptr;
- local_in_reg[arg_reg].start_address_ = 0;
- local_in_reg[arg_reg].reg_ = arg_reg;
- local_in_reg[arg_reg].is_live_ = true;
- switch (*descriptor) {
- case 'D':
- case 'J':
- arg_reg += 2;
- break;
- default:
- arg_reg += 1;
- break;
- }
- }
- if (i != parameters_size || it.HasNext()) {
- LOG(ERROR) << "invalid stream - problem with parameter iterator in " << GetLocation()
- << " for method " << this->PrettyMethod(method_idx);
- return false;
- }
-
- uint32_t address = 0;
- for (;;) {
- uint8_t opcode = *stream++;
- switch (opcode) {
- case DBG_END_SEQUENCE:
- // Emit all variables which are still alive at the end of the method.
- for (uint16_t reg = 0; reg < code_item->registers_size_; reg++) {
- if (local_in_reg[reg].is_live_) {
- local_in_reg[reg].end_address_ = code_item->insns_size_in_code_units_;
- local_cb(context, local_in_reg[reg]);
- }
- }
- return true;
- case DBG_ADVANCE_PC:
- address += DecodeUnsignedLeb128(&stream);
- break;
- case DBG_ADVANCE_LINE:
- DecodeSignedLeb128(&stream); // Line.
- break;
- case DBG_START_LOCAL:
- case DBG_START_LOCAL_EXTENDED: {
- uint16_t reg = DecodeUnsignedLeb128(&stream);
- if (reg >= code_item->registers_size_) {
- LOG(ERROR) << "invalid stream - reg >= reg size (" << reg << " >= "
- << code_item->registers_size_ << ") in " << GetLocation();
- return false;
- }
-
- uint32_t name_idx = DecodeUnsignedLeb128P1(&stream);
- uint16_t descriptor_idx = DecodeUnsignedLeb128P1(&stream);
- uint32_t signature_idx = dex::kDexNoIndex;
- if (opcode == DBG_START_LOCAL_EXTENDED) {
- signature_idx = DecodeUnsignedLeb128P1(&stream);
- }
-
- // Emit what was previously there, if anything
- if (local_in_reg[reg].is_live_) {
- local_in_reg[reg].end_address_ = address;
- local_cb(context, local_in_reg[reg]);
- }
-
- local_in_reg[reg].name_ = StringDataByIdx(dex::StringIndex(name_idx));
- local_in_reg[reg].descriptor_ =
- StringByTypeIdx(dex::TypeIndex(dchecked_integral_cast<uint16_t>(descriptor_idx)));;
- local_in_reg[reg].signature_ = StringDataByIdx(dex::StringIndex(signature_idx));
- local_in_reg[reg].start_address_ = address;
- local_in_reg[reg].reg_ = reg;
- local_in_reg[reg].is_live_ = true;
- break;
- }
- case DBG_END_LOCAL: {
- uint16_t reg = DecodeUnsignedLeb128(&stream);
- if (reg >= code_item->registers_size_) {
- LOG(ERROR) << "invalid stream - reg >= reg size (" << reg << " >= "
- << code_item->registers_size_ << ") in " << GetLocation();
- return false;
- }
- // If the register is live, close it properly. Otherwise, closing an already
- // closed register is sloppy, but harmless if no further action is taken.
- if (local_in_reg[reg].is_live_) {
- local_in_reg[reg].end_address_ = address;
- local_cb(context, local_in_reg[reg]);
- local_in_reg[reg].is_live_ = false;
- }
- break;
- }
- case DBG_RESTART_LOCAL: {
- uint16_t reg = DecodeUnsignedLeb128(&stream);
- if (reg >= code_item->registers_size_) {
- LOG(ERROR) << "invalid stream - reg >= reg size (" << reg << " >= "
- << code_item->registers_size_ << ") in " << GetLocation();
- return false;
- }
- // If the register is live, the "restart" is superfluous,
- // and we don't want to mess with the existing start address.
- if (!local_in_reg[reg].is_live_) {
- local_in_reg[reg].start_address_ = address;
- local_in_reg[reg].is_live_ = true;
- }
- break;
- }
- case DBG_SET_PROLOGUE_END:
- case DBG_SET_EPILOGUE_BEGIN:
- break;
- case DBG_SET_FILE:
- DecodeUnsignedLeb128P1(&stream); // name.
- break;
- default:
- address += (opcode - DBG_FIRST_SPECIAL) / DBG_LINE_RANGE;
- break;
- }
- }
-}
-
-bool DexFile::DecodeDebugPositionInfo(const CodeItem* code_item, DexDebugNewPositionCb position_cb,
- void* context) const {
- DCHECK(position_cb != nullptr);
- if (code_item == nullptr) {
- return false;
- }
- const uint8_t* stream = GetDebugInfoStream(code_item);
- if (stream == nullptr) {
- return false;
- }
-
- PositionInfo entry = PositionInfo();
- entry.line_ = DecodeUnsignedLeb128(&stream);
- uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
- for (uint32_t i = 0; i < parameters_size; ++i) {
- DecodeUnsignedLeb128P1(&stream); // Parameter name.
- }
-
- for (;;) {
- uint8_t opcode = *stream++;
- switch (opcode) {
- case DBG_END_SEQUENCE:
- return true; // end of stream.
- case DBG_ADVANCE_PC:
- entry.address_ += DecodeUnsignedLeb128(&stream);
- break;
- case DBG_ADVANCE_LINE:
- entry.line_ += DecodeSignedLeb128(&stream);
- break;
- case DBG_START_LOCAL:
- DecodeUnsignedLeb128(&stream); // reg.
- DecodeUnsignedLeb128P1(&stream); // name.
- DecodeUnsignedLeb128P1(&stream); // descriptor.
- break;
- case DBG_START_LOCAL_EXTENDED:
- DecodeUnsignedLeb128(&stream); // reg.
- DecodeUnsignedLeb128P1(&stream); // name.
- DecodeUnsignedLeb128P1(&stream); // descriptor.
- DecodeUnsignedLeb128P1(&stream); // signature.
- break;
- case DBG_END_LOCAL:
- case DBG_RESTART_LOCAL:
- DecodeUnsignedLeb128(&stream); // reg.
- break;
- case DBG_SET_PROLOGUE_END:
- entry.prologue_end_ = true;
- break;
- case DBG_SET_EPILOGUE_BEGIN:
- entry.epilogue_begin_ = true;
- break;
- case DBG_SET_FILE: {
- uint32_t name_idx = DecodeUnsignedLeb128P1(&stream);
- entry.source_file_ = StringDataByIdx(dex::StringIndex(name_idx));
- break;
- }
- default: {
- int adjopcode = opcode - DBG_FIRST_SPECIAL;
- entry.address_ += adjopcode / DBG_LINE_RANGE;
- entry.line_ += DBG_LINE_BASE + (adjopcode % DBG_LINE_RANGE);
- if (position_cb(context, entry)) {
- return true; // early exit.
- }
- entry.prologue_end_ = false;
- entry.epilogue_begin_ = false;
- break;
- }
- }
- }
-}
-
bool DexFile::LineNumForPcCb(void* raw_context, const PositionInfo& entry) {
LineNumFromPcContext* context = reinterpret_cast<LineNumFromPcContext*>(raw_context);
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 5759684c55..c895e0d1da 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -39,6 +39,21 @@ class Signature;
class StringPiece;
class ZipArchive;
+// Some instances of DexFile own the storage referred to by DexFile. Clients who create
+// such management do so by subclassing Container.
+class DexFileContainer {
+ public:
+ DexFileContainer() { }
+ virtual ~DexFileContainer() { }
+ virtual int GetPermissions() = 0;
+ virtual bool IsReadOnly() = 0;
+ virtual bool EnableWrite() = 0;
+ virtual bool DisableWrite() = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DexFileContainer);
+};
+
// Dex file is the API that exposes native dex files (ordinary dex files) and CompactDex.
// Originally, the dex file format used by ART was mostly the same as APKs. The only change was
// quickened opcodes and layout optimizations.
@@ -772,10 +787,6 @@ class DexFile {
bool epilogue_begin_ = false;
};
- // Callback for "new position table entry".
- // Returning true causes the decoder to stop early.
- typedef bool (*DexDebugNewPositionCb)(void* context, const PositionInfo& entry);
-
struct LocalInfo {
LocalInfo() = default;
@@ -857,14 +868,18 @@ class DexFile {
: reinterpret_cast<const AnnotationSetRefList*>(begin_ + offset);
}
- const AnnotationItem* GetAnnotationItem(const AnnotationSetItem* set_item, uint32_t index) const {
- DCHECK_LE(index, set_item->size_);
- uint32_t offset = set_item->entries_[index];
+ ALWAYS_INLINE const AnnotationItem* GetAnnotationItemAtOffset(uint32_t offset) const {
+ DCHECK_LE(offset, Size());
return (offset == 0)
? nullptr
: reinterpret_cast<const AnnotationItem*>(begin_ + offset);
}
+ const AnnotationItem* GetAnnotationItem(const AnnotationSetItem* set_item, uint32_t index) const {
+ DCHECK_LE(index, set_item->size_);
+ return GetAnnotationItemAtOffset(set_item->entries_[index]);
+ }
+
const AnnotationSetItem* GetSetRefItemItem(const AnnotationSetRefItem* anno_item) const {
uint32_t offset = anno_item->annotations_off_;
return (offset == 0)
@@ -899,11 +914,36 @@ class DexFile {
};
// Returns false if there is no debugging information or if it cannot be decoded.
- bool DecodeDebugLocalInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx,
- DexDebugNewLocalCb local_cb, void* context) const;
+ template<typename NewLocalCallback, typename IndexToStringData, typename TypeIndexToStringData>
+ static bool DecodeDebugLocalInfo(const uint8_t* stream,
+ const std::string& location,
+ const char* declaring_class_descriptor,
+ const std::vector<const char*>& arg_descriptors,
+ const std::string& method_name,
+ bool is_static,
+ uint16_t registers_size,
+ uint16_t ins_size,
+ uint16_t insns_size_in_code_units,
+ IndexToStringData index_to_string_data,
+ TypeIndexToStringData type_index_to_string_data,
+ NewLocalCallback new_local,
+ void* context);
+ template<typename NewLocalCallback>
+ bool DecodeDebugLocalInfo(const CodeItem* code_item,
+ bool is_static,
+ uint32_t method_idx,
+ NewLocalCallback new_local,
+ void* context) const;
// Returns false if there is no debugging information or if it cannot be decoded.
- bool DecodeDebugPositionInfo(const CodeItem* code_item, DexDebugNewPositionCb position_cb,
+ template<typename DexDebugNewPosition, typename IndexToStringData>
+ static bool DecodeDebugPositionInfo(const uint8_t* stream,
+ IndexToStringData index_to_string_data,
+ DexDebugNewPosition position_functor,
+ void* context);
+ template<typename DexDebugNewPosition>
+ bool DecodeDebugPositionInfo(const CodeItem* code_item,
+ DexDebugNewPosition position_functor,
void* context) const;
const char* GetSourceFile(const ClassDef& class_def) const {
@@ -955,12 +995,21 @@ class DexFile {
// Returns a human-readable form of the type at an index.
std::string PrettyType(dex::TypeIndex type_idx) const;
+ // Helper functions.
+ virtual bool IsCompactDexFile() const {
+ return false;
+ }
+ virtual bool IsStandardDexFile() const {
+ return false;
+ }
+
protected:
DexFile(const uint8_t* base,
size_t size,
const std::string& location,
uint32_t location_checksum,
- const OatDexFile* oat_dex_file);
+ const OatDexFile* oat_dex_file,
+ DexFileContainer* container);
// Top-level initializer that calls other Init methods.
bool Init(std::string* error_msg);
@@ -985,9 +1034,6 @@ class DexFile {
const uint32_t location_checksum_;
- // Manages the underlying memory allocation.
- std::unique_ptr<MemMap> mem_map_;
-
// Points to the header section.
const Header* const header_;
@@ -1026,6 +1072,9 @@ class DexFile {
// null.
mutable const OatDexFile* oat_dex_file_;
+ // Manages the underlying memory allocation.
+ std::unique_ptr<DexFileContainer> container_;
+
friend class DexFileLoader;
friend class DexFileVerifierTest;
friend class OatWriter;
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index fe33bded2b..845202ff72 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -134,8 +134,13 @@ const DexFile::AnnotationSetItem* FindAnnotationSetForField(ArtField* field)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile* dex_file = field->GetDexFile();
ObjPtr<mirror::Class> klass = field->GetDeclaringClass();
+ const DexFile::ClassDef* class_def = klass->GetClassDef();
+ if (class_def == nullptr) {
+ DCHECK(klass->IsProxyClass());
+ return nullptr;
+ }
const DexFile::AnnotationsDirectoryItem* annotations_dir =
- dex_file->GetAnnotationsDirectory(*klass->GetClassDef());
+ dex_file->GetAnnotationsDirectory(*class_def);
if (annotations_dir == nullptr) {
return nullptr;
}
@@ -258,6 +263,9 @@ const uint8_t* SearchEncodedAnnotation(const DexFile& dex_file,
const DexFile::AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (method->IsProxyMethod()) {
+ return nullptr;
+ }
const DexFile* dex_file = method->GetDexFile();
const DexFile::AnnotationsDirectoryItem* annotations_dir =
dex_file->GetAnnotationsDirectory(method->GetClassDef());
@@ -305,8 +313,13 @@ const DexFile::ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod*
const DexFile::AnnotationSetItem* FindAnnotationSetForClass(const ClassData& klass)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass.GetDexFile();
+ const DexFile::ClassDef* class_def = klass.GetClassDef();
+ if (class_def == nullptr) {
+ DCHECK(klass.GetRealClass()->IsProxyClass());
+ return nullptr;
+ }
const DexFile::AnnotationsDirectoryItem* annotations_dir =
- dex_file.GetAnnotationsDirectory(*klass.GetClassDef());
+ dex_file.GetAnnotationsDirectory(*class_def);
if (annotations_dir == nullptr) {
return nullptr;
}
diff --git a/runtime/dex_file_loader.cc b/runtime/dex_file_loader.cc
index 8cab1a501a..06e3397754 100644
--- a/runtime/dex_file_loader.cc
+++ b/runtime/dex_file_loader.cc
@@ -25,6 +25,7 @@
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
+#include "cdex/compact_dex_file.h"
#include "dex_file.h"
#include "dex_file_verifier.h"
#include "standard_dex_file.h"
@@ -32,17 +33,72 @@
namespace art {
+namespace {
+
+class MemMapContainer : public DexFileContainer {
+ public:
+ explicit MemMapContainer(std::unique_ptr<MemMap>&& mem_map) : mem_map_(std::move(mem_map)) { }
+ virtual ~MemMapContainer() OVERRIDE { }
+
+ int GetPermissions() OVERRIDE {
+ if (mem_map_.get() == nullptr) {
+ return 0;
+ } else {
+ return mem_map_->GetProtect();
+ }
+ }
+
+ bool IsReadOnly() OVERRIDE {
+ return GetPermissions() == PROT_READ;
+ }
+
+ bool EnableWrite() OVERRIDE {
+ CHECK(IsReadOnly());
+ if (mem_map_.get() == nullptr) {
+ return false;
+ } else {
+ return mem_map_->Protect(PROT_READ | PROT_WRITE);
+ }
+ }
+
+ bool DisableWrite() OVERRIDE {
+ CHECK(!IsReadOnly());
+ if (mem_map_.get() == nullptr) {
+ return false;
+ } else {
+ return mem_map_->Protect(PROT_READ);
+ }
+ }
+
+ private:
+ std::unique_ptr<MemMap> mem_map_;
+ DISALLOW_COPY_AND_ASSIGN(MemMapContainer);
+};
+
+} // namespace
+
using android::base::StringPrintf;
static constexpr OatDexFile* kNoOatDexFile = nullptr;
-bool DexFileLoader::IsValidMagic(uint32_t magic) {
- return IsValidMagic(reinterpret_cast<uint8_t*>(&magic));
+bool DexFileLoader::IsMagicValid(uint32_t magic) {
+ return IsMagicValid(reinterpret_cast<uint8_t*>(&magic));
}
-bool DexFileLoader::IsValidMagic(const uint8_t* magic) {
- return StandardDexFile::IsMagicValid(magic);
+bool DexFileLoader::IsMagicValid(const uint8_t* magic) {
+ return StandardDexFile::IsMagicValid(magic) ||
+ CompactDexFile::IsMagicValid(magic);
+}
+
+bool DexFileLoader::IsVersionAndMagicValid(const uint8_t* magic) {
+ if (StandardDexFile::IsMagicValid(magic)) {
+ return StandardDexFile::IsVersionValid(magic);
+ }
+ if (CompactDexFile::IsMagicValid(magic)) {
+ return CompactDexFile::IsVersionValid(magic);
+ }
+ return false;
}
bool DexFileLoader::GetMultiDexChecksums(const char* filename,
@@ -81,7 +137,7 @@ bool DexFileLoader::GetMultiDexChecksums(const char* filename,
} while (zip_entry.get() != nullptr);
return true;
}
- if (IsValidMagic(magic)) {
+ if (IsMagicValid(magic)) {
std::unique_ptr<const DexFile> dex_file(
OpenFile(fd.Release(), filename, false, false, error_msg));
if (dex_file == nullptr) {
@@ -139,7 +195,9 @@ std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base,
oat_dex_file,
verify,
verify_checksum,
- error_msg);
+ error_msg,
+ /*container*/ nullptr,
+ /*verify_result*/ nullptr);
}
std::unique_ptr<const DexFile> DexFileLoader::Open(const std::string& location,
@@ -165,15 +223,15 @@ std::unique_ptr<const DexFile> DexFileLoader::Open(const std::string& location,
kNoOatDexFile,
verify,
verify_checksum,
- error_msg);
- if (dex_file != nullptr) {
- dex_file->mem_map_ = std::move(map);
- }
+ error_msg,
+ new MemMapContainer(std::move(map)),
+ /*verify_result*/ nullptr);
return dex_file;
}
bool DexFileLoader::Open(const char* filename,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
@@ -186,12 +244,12 @@ bool DexFileLoader::Open(const char* filename,
return false;
}
if (IsZipMagic(magic)) {
- return OpenZip(fd.Release(), location, verify_checksum, error_msg, dex_files);
+ return OpenZip(fd.Release(), location, verify, verify_checksum, error_msg, dex_files);
}
- if (IsValidMagic(magic)) {
+ if (IsMagicValid(magic)) {
std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
location,
- /* verify */ true,
+ verify,
verify_checksum,
error_msg));
if (dex_file.get() != nullptr) {
@@ -207,14 +265,16 @@ bool DexFileLoader::Open(const char* filename,
std::unique_ptr<const DexFile> DexFileLoader::OpenDex(int fd,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg) {
ScopedTrace trace("Open dex file " + std::string(location));
- return OpenFile(fd, location, true /* verify */, verify_checksum, error_msg);
+ return OpenFile(fd, location, verify, verify_checksum, error_msg);
}
bool DexFileLoader::OpenZip(int fd,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
@@ -225,7 +285,8 @@ bool DexFileLoader::OpenZip(int fd,
DCHECK(!error_msg->empty());
return false;
}
- return OpenAllDexFilesFromZip(*zip_archive, location, verify_checksum, error_msg, dex_files);
+ return OpenAllDexFilesFromZip(
+ *zip_archive, location, verify, verify_checksum, error_msg, dex_files);
}
std::unique_ptr<const DexFile> DexFileLoader::OpenFile(int fd,
@@ -280,10 +341,9 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenFile(int fd,
kNoOatDexFile,
verify,
verify_checksum,
- error_msg);
- if (dex_file != nullptr) {
- dex_file->mem_map_ = std::move(map);
- }
+ error_msg,
+ new MemMapContainer(std::move(map)),
+ /*verify_result*/ nullptr);
return dex_file;
}
@@ -292,6 +352,7 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
const ZipArchive& zip_archive,
const char* entry_name,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
ZipOpenErrorCode* error_code) {
@@ -345,9 +406,10 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
location,
zip_entry->GetCrc32(),
kNoOatDexFile,
- /* verify */ true,
+ verify,
verify_checksum,
error_msg,
+ new MemMapContainer(std::move(map)),
&verify_result);
if (dex_file == nullptr) {
if (verify_result == VerifyResult::kVerifyNotAttempted) {
@@ -357,7 +419,6 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
}
return nullptr;
}
- dex_file->mem_map_ = std::move(map);
if (!dex_file->DisableWrite()) {
*error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
*error_code = ZipOpenErrorCode::kMakeReadOnlyError;
@@ -379,16 +440,18 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
static constexpr size_t kWarnOnManyDexFilesThreshold = 100;
bool DexFileLoader::OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) {
ScopedTrace trace("Dex file open from Zip " + std::string(location));
DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
ZipOpenErrorCode error_code;
std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive,
kClassesDex,
location,
+ verify,
verify_checksum,
error_msg,
&error_code));
@@ -409,6 +472,7 @@ bool DexFileLoader::OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
std::unique_ptr<const DexFile> next_dex_file(OpenOneDexFileFromZip(zip_archive,
name.c_str(),
fake_location,
+ verify,
verify_checksum,
error_msg,
&error_code));
@@ -445,15 +509,18 @@ std::unique_ptr<DexFile> DexFileLoader::OpenCommon(const uint8_t* base,
bool verify,
bool verify_checksum,
std::string* error_msg,
+ DexFileContainer* container,
VerifyResult* verify_result) {
if (verify_result != nullptr) {
*verify_result = VerifyResult::kVerifyNotAttempted;
}
std::unique_ptr<DexFile> dex_file;
if (StandardDexFile::IsMagicValid(base)) {
- dex_file.reset(new StandardDexFile(base, size, location, location_checksum, oat_dex_file));
- } else {
- return nullptr;
+ dex_file.reset(
+ new StandardDexFile(base, size, location, location_checksum, oat_dex_file, container));
+ } else if (CompactDexFile::IsMagicValid(base)) {
+ dex_file.reset(
+ new CompactDexFile(base, size, location, location_checksum, oat_dex_file, container));
}
if (dex_file == nullptr) {
*error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location.c_str(),
diff --git a/runtime/dex_file_loader.h b/runtime/dex_file_loader.h
index 61b5c71726..97c886ac7d 100644
--- a/runtime/dex_file_loader.h
+++ b/runtime/dex_file_loader.h
@@ -25,6 +25,7 @@
namespace art {
class DexFile;
+class DexFileContainer;
class MemMap;
class OatDexFile;
class ZipArchive;
@@ -39,8 +40,11 @@ class DexFileLoader {
static constexpr char kMultiDexSeparator = '!';
// Return true if the magic is valid for dex or cdex.
- static bool IsValidMagic(uint32_t magic);
- static bool IsValidMagic(const uint8_t* magic);
+ static bool IsMagicValid(uint32_t magic);
+ static bool IsMagicValid(const uint8_t* magic);
+
+ // Return true if the corresponding version and magic is valid.
+ static bool IsVersionAndMagicValid(const uint8_t* magic);
// Returns the checksums of a file for comparison with GetLocationChecksum().
// For .dex files, this is the single header checksum.
@@ -76,6 +80,7 @@ class DexFileLoader {
// Opens all .dex files found in the file, guessing the container format based on file extension.
static bool Open(const char* filename,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files);
@@ -83,12 +88,14 @@ class DexFileLoader {
// Open a single dex file from an fd. This function closes the fd.
static std::unique_ptr<const DexFile> OpenDex(int fd,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg);
// Opens dex files from within a .jar, .zip, or .apk file
static bool OpenZip(int fd,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files);
@@ -155,6 +162,7 @@ class DexFileLoader {
// Open all classesXXX.dex files from a zip archive.
static bool OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files);
@@ -164,6 +172,7 @@ class DexFileLoader {
static std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const ZipArchive& zip_archive,
const char* entry_name,
const std::string& location,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
ZipOpenErrorCode* error_code);
@@ -182,17 +191,8 @@ class DexFileLoader {
bool verify,
bool verify_checksum,
std::string* error_msg,
- VerifyResult* verify_result = nullptr);
-
-
- // Opens a .dex file at the given address, optionally backed by a MemMap
- static std::unique_ptr<const DexFile> OpenMemory(const uint8_t* dex_file,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- std::unique_ptr<MemMap> mem_map,
- const OatDexFile* oat_dex_file,
- std::string* error_msg);
+ DexFileContainer* container,
+ VerifyResult* verify_result);
};
} // namespace art
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index b3011379c6..90bc4b8f94 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -236,7 +236,8 @@ static bool OpenDexFilesBase64(const char* base64,
ScopedObjectAccess soa(Thread::Current());
static constexpr bool kVerifyChecksum = true;
std::vector<std::unique_ptr<const DexFile>> tmp;
- bool success = DexFileLoader::Open(location, location, kVerifyChecksum, error_msg, &tmp);
+ bool success = DexFileLoader::Open(
+ location, location, /* verify */ true, kVerifyChecksum, error_msg, &tmp);
if (success) {
for (std::unique_ptr<const DexFile>& dex_file : tmp) {
EXPECT_EQ(PROT_READ, dex_file->GetPermissions());
@@ -366,7 +367,8 @@ TEST_F(DexFileTest, Version40Rejected) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+ ASSERT_FALSE(DexFileLoader::Open(
+ location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files));
}
TEST_F(DexFileTest, Version41Rejected) {
@@ -378,7 +380,8 @@ TEST_F(DexFileTest, Version41Rejected) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+ ASSERT_FALSE(DexFileLoader::Open(
+ location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files));
}
TEST_F(DexFileTest, ZeroLengthDexRejected) {
@@ -390,7 +393,8 @@ TEST_F(DexFileTest, ZeroLengthDexRejected) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFileLoader::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+ ASSERT_FALSE(DexFileLoader::Open(
+ location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files));
}
TEST_F(DexFileTest, GetLocationChecksum) {
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 8fdd4706e4..50f56c799a 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -721,14 +721,19 @@ bool DexFileVerifier::CheckClassDataItemMethod(uint32_t idx,
return true;
}
-bool DexFileVerifier::CheckPadding(size_t offset, uint32_t aligned_offset) {
+bool DexFileVerifier::CheckPadding(size_t offset,
+ uint32_t aligned_offset,
+ DexFile::MapItemType type) {
if (offset < aligned_offset) {
if (!CheckListSize(begin_ + offset, aligned_offset - offset, sizeof(uint8_t), "section")) {
return false;
}
while (offset < aligned_offset) {
if (UNLIKELY(*ptr_ != '\0')) {
- ErrorStringPrintf("Non-zero padding %x before section start at %zx", *ptr_, offset);
+ ErrorStringPrintf("Non-zero padding %x before section of type %zu at offset 0x%zx",
+ *ptr_,
+ static_cast<size_t>(type),
+ offset);
return false;
}
ptr_++;
@@ -1615,7 +1620,7 @@ bool DexFileVerifier::CheckIntraSectionIterate(size_t offset, uint32_t section_c
size_t aligned_offset = (offset + alignment_mask) & ~alignment_mask;
// Check the padding between items.
- if (!CheckPadding(offset, aligned_offset)) {
+ if (!CheckPadding(offset, aligned_offset, type)) {
return false;
}
@@ -1837,7 +1842,10 @@ bool DexFileVerifier::CheckIntraDataSection(size_t offset,
size_t next_offset = ptr_ - begin_;
if (next_offset > data_end) {
- ErrorStringPrintf("Out-of-bounds end of data subsection: %zx", next_offset);
+ ErrorStringPrintf("Out-of-bounds end of data subsection: %zu data_off=%u data_size=%u",
+ next_offset,
+ header_->data_off_,
+ header_->data_size_);
return false;
}
@@ -1859,7 +1867,7 @@ bool DexFileVerifier::CheckIntraSection() {
DexFile::MapItemType type = static_cast<DexFile::MapItemType>(item->type_);
// Check for padding and overlap between items.
- if (!CheckPadding(offset, section_offset)) {
+ if (!CheckPadding(offset, section_offset, type)) {
return false;
} else if (UNLIKELY(offset > section_offset)) {
ErrorStringPrintf("Section overlap or out-of-order map: %zx, %x", offset, section_offset);
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 74f82254b3..23089fa215 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -97,7 +97,7 @@ class DexFileVerifier {
const DexFile::ClassDef** class_def);
bool CheckStaticFieldTypes(const DexFile::ClassDef* class_def);
- bool CheckPadding(size_t offset, uint32_t aligned_offset);
+ bool CheckPadding(size_t offset, uint32_t aligned_offset, DexFile::MapItemType type);
bool CheckEncodedValue();
bool CheckEncodedArray();
bool CheckEncodedAnnotation();
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 9f3505d3be..ee577e7d9a 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -57,7 +57,7 @@ static void FixUpChecksum(uint8_t* dex_file) {
class DexFileVerifierTest : public CommonRuntimeTest {
protected:
DexFile* GetDexFile(const uint8_t* dex_bytes, size_t length) {
- return new StandardDexFile(dex_bytes, length, "tmp", 0, nullptr);
+ return new StandardDexFile(dex_bytes, length, "tmp", 0, nullptr, nullptr);
}
void VerifyModification(const char* dex_file_base64_content,
@@ -114,7 +114,8 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
// read dex file
ScopedObjectAccess soa(Thread::Current());
std::vector<std::unique_ptr<const DexFile>> tmp;
- bool success = DexFileLoader::Open(location, location, true, error_msg, &tmp);
+ bool success = DexFileLoader::Open(
+ location, location, /* verify */ true, /* verify_checksum */ true, error_msg, &tmp);
CHECK(success) << *error_msg;
EXPECT_EQ(1U, tmp.size());
std::unique_ptr<const DexFile> dex_file = std::move(tmp[0]);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index ea7a83c75e..cf5cc111b7 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2603,7 +2603,7 @@ extern "C" uintptr_t artInvokePolymorphic(
gc_visitor.VisitArguments();
// Wrap raw_method_handle in a Handle for safety.
- StackHandleScope<5> hs(self);
+ StackHandleScope<2> hs(self);
Handle<mirror::MethodHandle> method_handle(
hs.NewHandle(ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(raw_method_handle))));
raw_method_handle = nullptr;
@@ -2622,11 +2622,9 @@ extern "C" uintptr_t artInvokePolymorphic(
return static_cast<uintptr_t>('V');
}
- Handle<mirror::Class> caller_class(hs.NewHandle(caller_method->GetDeclaringClass()));
- Handle<mirror::MethodType> method_type(hs.NewHandle(linker->ResolveMethodType(
- *dex_file, proto_idx,
- hs.NewHandle<mirror::DexCache>(caller_class->GetDexCache()),
- hs.NewHandle<mirror::ClassLoader>(caller_class->GetClassLoader()))));
+ Handle<mirror::MethodType> method_type(
+ hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method)));
+
// This implies we couldn't resolve one or more types in this method handle.
if (UNLIKELY(method_type.IsNull())) {
CHECK(self->IsExceptionPending());
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 52b355dedd..4d4d8ffb58 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -2153,14 +2153,18 @@ void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t by
mirror::Class* int_array_class = down_cast<mirror::Class*>(
Mark(mirror::IntArray::GetArrayClass<kWithoutReadBarrier>()));
CHECK(int_array_class != nullptr);
- AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
+ if (ReadBarrier::kEnableToSpaceInvariantChecks) {
+ AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
+ }
size_t component_size = int_array_class->GetComponentSize<kWithoutReadBarrier>();
CHECK_EQ(component_size, sizeof(int32_t));
size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
if (data_offset > byte_size) {
// An int array is too big. Use java.lang.Object.
CHECK(java_lang_Object_ != nullptr);
- AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_);
+ if (ReadBarrier::kEnableToSpaceInvariantChecks) {
+ AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_);
+ }
CHECK_EQ(byte_size, (java_lang_Object_->GetObjectSize<kVerifyNone, kWithoutReadBarrier>()));
dummy_obj->SetClass(java_lang_Object_);
CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()));
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 4004af2875..4f5458212c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -77,7 +77,7 @@
#include "mirror/object-refvisitor-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/reference-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr-inl.h"
#include "os.h"
#include "reflection.h"
@@ -93,6 +93,9 @@ namespace gc {
static constexpr size_t kCollectorTransitionStressIterations = 0;
static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
+
+DEFINE_RUNTIME_DEBUG_FLAG(Heap, kStressCollectorTransition);
+
// Minimum amount of remaining bytes before a concurrent GC is triggered.
static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
@@ -132,10 +135,6 @@ static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
// Dump the rosalloc stats on SIGQUIT.
static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
-// Extra added to the heap growth multiplier. Used to adjust the GC ergonomics for the read barrier
-// config.
-static constexpr double kExtraHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
-
static const char* kRegionSpaceName = "main space (region space)";
// If true, we log all GCs in the both the foreground and background. Used for debugging.
@@ -255,8 +254,7 @@ Heap::Heap(size_t initial_size,
min_free_(min_free),
max_free_(max_free),
target_utilization_(target_utilization),
- foreground_heap_growth_multiplier_(
- foreground_heap_growth_multiplier + kExtraHeapGrowthMultiplier),
+ foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
total_wait_time_(0),
verify_object_mode_(kVerifyObjectModeDisabled),
disable_moving_gc_count_(0),
@@ -894,7 +892,9 @@ void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_p
// the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't
// transition the collector.
RequestCollectorTransition(background_collector_type_,
- kIsDebugBuild ? 0 : kCollectorTransitionWait);
+ kStressCollectorTransition
+ ? 0
+ : kCollectorTransitionWait);
}
}
}
@@ -3428,7 +3428,7 @@ collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_ty
double Heap::HeapGrowthMultiplier() const {
// If we don't care about pause times we are background, so return 1.0.
- if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
+ if (!CareAboutPauseTimes()) {
return 1.0;
}
return foreground_heap_growth_multiplier_;
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d673b4ac29..7b4fab607f 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -25,6 +25,7 @@
#include "allocator_type.h"
#include "arch/instruction_set.h"
#include "atomic.h"
+#include "base/logging.h"
#include "base/mutex.h"
#include "base/time_utils.h"
#include "gc/collector/gc_type.h"
@@ -155,6 +156,9 @@ class Heap {
static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
// How long we wait after a transition request to perform a collector transition (nanoseconds).
static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
+ // Whether the transition-wait applies or not. Zero wait will stress the
+ // transition code and collector, but increases jank probability.
+ DECLARE_RUNTIME_DEBUG_FLAG(kStressCollectorTransition);
// Create a heap with the requested sizes. The possible empty
// image_file_names names specify Spaces to load based on
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 42b31ab140..d58d09c794 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -22,7 +22,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/reference-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "object_callbacks.h"
#include "reference_processor-inl.h"
#include "reflection.h"
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 2c82cb1acd..49f202182d 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -137,11 +137,12 @@ static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
method->SetEntryPointFromQuickCompiledCode(quick_code);
}
-bool Instrumentation::NeedDebugVersionFor(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
- return Dbg::IsDebuggerActive() &&
- Runtime::Current()->IsJavaDebuggable() &&
+bool Instrumentation::NeedDebugVersionFor(ArtMethod* method) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return Runtime::Current()->IsJavaDebuggable() &&
!method->IsNative() &&
- !method->IsProxyMethod();
+ !method->IsProxyMethod() &&
+ Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method);
}
void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 68a75b0196..038405eebc 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -18,6 +18,7 @@
#include <limits>
+#include "common_dex_operations.h"
#include "common_throws.h"
#include "dex_file_types.h"
#include "interpreter_common.h"
@@ -28,7 +29,7 @@
#include "jvalue-inl.h"
#include "mirror/string-inl.h"
#include "mterp/mterp.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
#include "thread-inl.h"
@@ -287,11 +288,12 @@ static inline JValue Execute(
}
}
- shadow_frame.GetMethod()->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
+ ArtMethod* method = shadow_frame.GetMethod();
+
+ DCheckStaticState(self, method);
// Lock counting is a special version of accessibility checks, and for simplicity and
// reduction of template parameters, we gate it behind access-checks mode.
- ArtMethod* method = shadow_frame.GetMethod();
DCHECK(!method->SkipAccessChecks() || !method->MustCountLocks());
bool transaction_active = Runtime::Current()->IsActiveTransaction();
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 5938113fe1..9fb9fe7274 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -610,7 +610,7 @@ bool DoInvokePolymorphic(Thread* self,
// The invoke_method_idx here is the name of the signature polymorphic method that
// was symbolically invoked in bytecode (say MethodHandle.invoke or MethodHandle.invokeExact)
// and not the method that we'll dispatch to in the end.
- StackHandleScope<5> hs(self);
+ StackHandleScope<2> hs(self);
Handle<mirror::MethodHandle> method_handle(hs.NewHandle(
ObjPtr<mirror::MethodHandle>::DownCast(
MakeObjPtr(shadow_frame.GetVRegReference(vRegC)))));
@@ -629,11 +629,8 @@ bool DoInvokePolymorphic(Thread* self,
// with the callsite. This information is stored in the dex cache so it's
// guaranteed to be fast after the first resolution.
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Handle<mirror::Class> caller_class(hs.NewHandle(shadow_frame.GetMethod()->GetDeclaringClass()));
- Handle<mirror::MethodType> callsite_type(hs.NewHandle(class_linker->ResolveMethodType(
- caller_class->GetDexFile(), callsite_proto_id,
- hs.NewHandle<mirror::DexCache>(caller_class->GetDexCache()),
- hs.NewHandle<mirror::ClassLoader>(caller_class->GetClassLoader()))));
+ Handle<mirror::MethodType> callsite_type(hs.NewHandle(
+ class_linker->ResolveMethodType(self, callsite_proto_id, shadow_frame.GetMethod())));
// This implies we couldn't resolve one or more types in this method handle.
if (UNLIKELY(callsite_type == nullptr)) {
@@ -695,7 +692,7 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
uint32_t method_handle_idx = static_cast<uint32_t>(it.GetJavaValue().i);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
Handle<mirror::MethodHandle>
- bootstrap(hs.NewHandle(class_linker->ResolveMethodHandle(method_handle_idx, referrer)));
+ bootstrap(hs.NewHandle(class_linker->ResolveMethodHandle(self, method_handle_idx, referrer)));
if (bootstrap.IsNull()) {
DCHECK(self->IsExceptionPending());
return nullptr;
@@ -740,7 +737,8 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
// The third parameter is the method type associated with the name.
uint32_t method_type_idx = static_cast<uint32_t>(it.GetJavaValue().i);
Handle<mirror::MethodType>
- method_type(hs.NewHandle(class_linker->ResolveMethodType(*dex_file,
+ method_type(hs.NewHandle(class_linker->ResolveMethodType(self,
+ *dex_file,
method_type_idx,
dex_cache,
class_loader)));
@@ -778,7 +776,7 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
case EncodedArrayValueIterator::ValueType::kMethodType: {
uint32_t idx = static_cast<uint32_t>(jvalue.i);
ObjPtr<mirror::MethodType> ref =
- class_linker->ResolveMethodType(*dex_file, idx, dex_cache, class_loader);
+ class_linker->ResolveMethodType(self, *dex_file, idx, dex_cache, class_loader);
if (ref.IsNull()) {
DCHECK(self->IsExceptionPending());
return nullptr;
@@ -790,7 +788,7 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
case EncodedArrayValueIterator::ValueType::kMethodHandle: {
uint32_t idx = static_cast<uint32_t>(jvalue.i);
ObjPtr<mirror::MethodHandle> ref =
- class_linker->ResolveMethodHandle(idx, referrer);
+ class_linker->ResolveMethodHandle(self, idx, referrer);
if (ref.IsNull()) {
DCHECK(self->IsExceptionPending());
return nullptr;
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 1c796195c0..e7f67ebb0d 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -206,11 +206,12 @@ static inline bool DoInvoke(Thread* self,
}
}
-static inline mirror::MethodHandle* ResolveMethodHandle(uint32_t method_handle_index,
+static inline mirror::MethodHandle* ResolveMethodHandle(Thread* self,
+ uint32_t method_handle_index,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- return class_linker->ResolveMethodHandle(method_handle_index, referrer);
+ return class_linker->ResolveMethodHandle(self, method_handle_index, referrer);
}
static inline mirror::MethodType* ResolveMethodType(Thread* self,
@@ -218,11 +219,7 @@ static inline mirror::MethodType* ResolveMethodType(Thread* self,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- const DexFile* dex_file = referrer->GetDexFile();
- StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
- return class_linker->ResolveMethodType(*dex_file, method_type_index, dex_cache, class_loader);
+ return class_linker->ResolveMethodType(self, method_type_index, referrer);
}
// Performs a signature polymorphic invoke (invoke-polymorphic/invoke-polymorphic-range).
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 74d7901ffe..094f08664e 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -534,8 +534,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::CONST_METHOD_HANDLE: {
PREAMBLE();
- ObjPtr<mirror::MethodHandle> mh =
- Runtime::Current()->GetClassLinker()->ResolveMethodHandle(inst->VRegB_21c(), method);
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ ObjPtr<mirror::MethodHandle> mh = cl->ResolveMethodHandle(self, inst->VRegB_21c(), method);
if (UNLIKELY(mh == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
@@ -546,8 +546,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::CONST_METHOD_TYPE: {
PREAMBLE();
- ObjPtr<mirror::MethodType> mt =
- Runtime::Current()->GetClassLinker()->ResolveMethodType(inst->VRegB_21c(), method);
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ ObjPtr<mirror::MethodType> mt = cl->ResolveMethodType(self, inst->VRegB_21c(), method);
if (UNLIKELY(mt == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
diff --git a/runtime/interpreter/mterp/Makefile_mterp b/runtime/interpreter/mterp/Makefile_mterp
index f0c30ad56c..ac8da69742 100644
--- a/runtime/interpreter/mterp/Makefile_mterp
+++ b/runtime/interpreter/mterp/Makefile_mterp
@@ -25,7 +25,7 @@ SHELL := /bin/sh
# To generate sources:
# for arch in arm arm64 x86 x86_64 mips mips64
# do
-# TARGET_ARCH_EXT=$arch make -f Makefile-mterp
+# TARGET_ARCH_EXT=$arch make -f Makefile_mterp
# done
#
diff --git a/runtime/interpreter/mterp/arm/const.S b/runtime/interpreter/mterp/arm/const.S
new file mode 100644
index 0000000000..f6f8157a0b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/const.S
@@ -0,0 +1,18 @@
+%default { "helper":"UndefinedConstHandler" }
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern $helper
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl $helper @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 @ load rINST
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/entry.S b/runtime/interpreter/mterp/arm/entry.S
index ce14b548d0..5781414f27 100644
--- a/runtime/interpreter/mterp/arm/entry.S
+++ b/runtime/interpreter/mterp/arm/entry.S
@@ -19,8 +19,6 @@
.text
.align 2
- .global ExecuteMterpImpl
- .type ExecuteMterpImpl, %function
/*
* On entry:
diff --git a/runtime/interpreter/mterp/arm/invoke_polymorphic.S b/runtime/interpreter/mterp/arm/invoke_polymorphic.S
new file mode 100644
index 0000000000..f569d61c0b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/invoke_polymorphic.S
@@ -0,0 +1,21 @@
+%default { "helper":"UndefinedInvokeHandler" }
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern $helper
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl $helper
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 4
+ bl MterpShouldSwitchInterpreters
+ cmp r0, #0
+ bne MterpFallback
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
diff --git a/runtime/interpreter/mterp/arm/op_const_class.S b/runtime/interpreter/mterp/arm/op_const_class.S
index 0b111f4d06..ff5c98c743 100644
--- a/runtime/interpreter/mterp/arm/op_const_class.S
+++ b/runtime/interpreter/mterp/arm/op_const_class.S
@@ -1,13 +1 @@
- /* const/class vAA, Class@BBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstClass @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
+%include "arm/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/arm/op_const_method_handle.S b/runtime/interpreter/mterp/arm/op_const_method_handle.S
new file mode 100644
index 0000000000..71f05501e7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_method_handle.S
@@ -0,0 +1 @@
+%include "arm/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/arm/op_const_method_type.S b/runtime/interpreter/mterp/arm/op_const_method_type.S
new file mode 100644
index 0000000000..2cccdafef4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_method_type.S
@@ -0,0 +1 @@
+%include "arm/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/arm/op_const_string.S b/runtime/interpreter/mterp/arm/op_const_string.S
index 4b8302a9ec..75ec34ffb4 100644
--- a/runtime/interpreter/mterp/arm/op_const_string.S
+++ b/runtime/interpreter/mterp/arm/op_const_string.S
@@ -1,13 +1 @@
- /* const/string vAA, String@BBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 @ load rINST
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
+%include "arm/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_custom.S b/runtime/interpreter/mterp/arm/op_invoke_custom.S
new file mode 100644
index 0000000000..2af875c9df
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_custom.S
@@ -0,0 +1,8 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeCustom" }
+ /*
+ * Handle an invoke-custom invocation.
+ *
+ * for: invoke-custom, invoke-custom/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, call_site@BBBB */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, call_site@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_custom_range.S b/runtime/interpreter/mterp/arm/op_invoke_custom_range.S
new file mode 100644
index 0000000000..32575c4d45
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_custom_range.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_polymorphic.S b/runtime/interpreter/mterp/arm/op_invoke_polymorphic.S
new file mode 100644
index 0000000000..816a7ae217
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_polymorphic.S
@@ -0,0 +1 @@
+%include "arm/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/arm/op_invoke_polymorphic_range.S
new file mode 100644
index 0000000000..2541c270e2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_polymorphic_range.S
@@ -0,0 +1 @@
+%include "arm/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/arm/op_unused_fe.S b/runtime/interpreter/mterp/arm/op_unused_fe.S
deleted file mode 100644
index 10948dc06c..0000000000
--- a/runtime/interpreter/mterp/arm/op_unused_fe.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_ff.S b/runtime/interpreter/mterp/arm/op_unused_ff.S
deleted file mode 100644
index 10948dc06c..0000000000
--- a/runtime/interpreter/mterp/arm/op_unused_ff.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/close_cfi.S b/runtime/interpreter/mterp/arm64/close_cfi.S
new file mode 100644
index 0000000000..7ba0486079
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/close_cfi.S
@@ -0,0 +1,4 @@
+// Close out the cfi info. We're treating mterp as a single function.
+
+END ExecuteMterpImpl
+
diff --git a/runtime/interpreter/mterp/arm64/const.S b/runtime/interpreter/mterp/arm64/const.S
new file mode 100644
index 0000000000..6f82bbf0ba
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/const.S
@@ -0,0 +1,17 @@
+%default { "helper":"UndefinedConstHandler" }
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern $helper
+ EXPORT_PC
+ FETCH w0, 1 // w0<- BBBB
+ lsr w1, wINST, #8 // w1<- AA
+ add x2, xFP, #OFF_FP_SHADOWFRAME
+ mov x3, xSELF
+ bl $helper // (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 // load rINST
+ cbnz w0, MterpPossibleException // let reference interpreter deal with it.
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/entry.S b/runtime/interpreter/mterp/arm64/entry.S
index 73c5a88e5f..7306e4e0f1 100644
--- a/runtime/interpreter/mterp/arm64/entry.S
+++ b/runtime/interpreter/mterp/arm64/entry.S
@@ -25,12 +25,7 @@
* x3 JValue* result_register
*
*/
- .global ExecuteMterpImpl
- .type ExecuteMterpImpl, %function
- .balign 16
-
-ExecuteMterpImpl:
- .cfi_startproc
+ENTRY ExecuteMterpImpl
SAVE_TWO_REGS_INCREASE_FRAME xPROFILE, x27, 80
SAVE_TWO_REGS xIBASE, xREFS, 16
SAVE_TWO_REGS xSELF, xINST, 32
diff --git a/runtime/interpreter/mterp/arm64/footer.S b/runtime/interpreter/mterp/arm64/footer.S
index 388fc8db74..fafa606a10 100644
--- a/runtime/interpreter/mterp/arm64/footer.S
+++ b/runtime/interpreter/mterp/arm64/footer.S
@@ -305,6 +305,3 @@ MterpProfileActive:
RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
ret
- .cfi_endproc
- .size ExecuteMterpImpl, .-ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/arm64/header.S b/runtime/interpreter/mterp/arm64/header.S
index 7125d5a74d..cedfa49133 100644
--- a/runtime/interpreter/mterp/arm64/header.S
+++ b/runtime/interpreter/mterp/arm64/header.S
@@ -330,3 +330,20 @@ codes.
.cfi_restore \reg2
.cfi_adjust_cfa_offset -(\frame_adjustment)
.endm
+
+/*
+ * cfi support macros.
+ */
+.macro ENTRY name
+ .type \name, #function
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+ .cfi_startproc
+.endm
+
+.macro END name
+ .cfi_endproc
+ .size \name, .-\name
+.endm
diff --git a/runtime/interpreter/mterp/arm64/invoke_polymorphic.S b/runtime/interpreter/mterp/arm64/invoke_polymorphic.S
new file mode 100644
index 0000000000..7906f0ada0
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/invoke_polymorphic.S
@@ -0,0 +1,19 @@
+%default { "helper":"UndefinedInvokeHandler" }
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern $helper
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ mov x3, xINST
+ bl $helper
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 4
+ bl MterpShouldSwitchInterpreters
+ cbnz w0, MterpFallback
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
diff --git a/runtime/interpreter/mterp/arm64/op_const_class.S b/runtime/interpreter/mterp/arm64/op_const_class.S
index 971cfa08bd..7228245b8f 100644
--- a/runtime/interpreter/mterp/arm64/op_const_class.S
+++ b/runtime/interpreter/mterp/arm64/op_const_class.S
@@ -1,12 +1 @@
- /* const/class vAA, Class//BBBB */
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstClass // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2
- cbnz w0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
+%include "arm64/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/arm64/op_const_method_handle.S b/runtime/interpreter/mterp/arm64/op_const_method_handle.S
new file mode 100644
index 0000000000..0df0fa6798
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_const_method_handle.S
@@ -0,0 +1 @@
+%include "arm64/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/arm64/op_const_method_type.S b/runtime/interpreter/mterp/arm64/op_const_method_type.S
new file mode 100644
index 0000000000..1adfe5ad65
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_const_method_type.S
@@ -0,0 +1 @@
+%include "arm64/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/arm64/op_const_string.S b/runtime/interpreter/mterp/arm64/op_const_string.S
index 896f1e7104..8cf0d6dc35 100644
--- a/runtime/interpreter/mterp/arm64/op_const_string.S
+++ b/runtime/interpreter/mterp/arm64/op_const_string.S
@@ -1,12 +1 @@
- /* const/string vAA, String//BBBB */
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstString // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 // load rINST
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
+%include "arm64/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_custom.S b/runtime/interpreter/mterp/arm64/op_invoke_custom.S
new file mode 100644
index 0000000000..3686584950
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_custom.S
@@ -0,0 +1 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_custom_range.S b/runtime/interpreter/mterp/arm64/op_invoke_custom_range.S
new file mode 100644
index 0000000000..06de86a6a0
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_custom_range.S
@@ -0,0 +1 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_polymorphic.S b/runtime/interpreter/mterp/arm64/op_invoke_polymorphic.S
new file mode 100644
index 0000000000..aace98f1a2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_polymorphic.S
@@ -0,0 +1 @@
+%include "arm64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/arm64/op_invoke_polymorphic_range.S
new file mode 100644
index 0000000000..30c8c09cce
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_polymorphic_range.S
@@ -0,0 +1 @@
+%include "arm64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_unused_fe.S b/runtime/interpreter/mterp/arm64/op_unused_fe.S
deleted file mode 100644
index 204eceff7e..0000000000
--- a/runtime/interpreter/mterp/arm64/op_unused_fe.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_ff.S b/runtime/interpreter/mterp/arm64/op_unused_ff.S
deleted file mode 100644
index 204eceff7e..0000000000
--- a/runtime/interpreter/mterp/arm64/op_unused_ff.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/config_arm b/runtime/interpreter/mterp/config_arm
index b19426bfbb..a45efd999b 100644
--- a/runtime/interpreter/mterp/config_arm
+++ b/runtime/interpreter/mterp/config_arm
@@ -286,12 +286,12 @@ op-start arm
# op op_unused_f7 FALLBACK
# op op_unused_f8 FALLBACK
# op op_unused_f9 FALLBACK
- op op_invoke_polymorphic FALLBACK
- op op_invoke_polymorphic_range FALLBACK
- op op_invoke_custom FALLBACK
- op op_invoke_custom_range FALLBACK
- # op op_unused_fe FALLBACK
- # op op_unused_ff FALLBACK
+ # op op_invoke_polymorphic FALLBACK
+ # op op_invoke_polymorphic_range FALLBACK
+ # op op_invoke_custom FALLBACK
+ # op op_invoke_custom_range FALLBACK
+ # op op_const_method_handle FALLBACK
+ # op op_const_method_type FALLBACK
op-end
# common subroutines for asm
diff --git a/runtime/interpreter/mterp/config_arm64 b/runtime/interpreter/mterp/config_arm64
index 0987964090..590363f6e4 100644
--- a/runtime/interpreter/mterp/config_arm64
+++ b/runtime/interpreter/mterp/config_arm64
@@ -284,12 +284,12 @@ op-start arm64
# op op_unused_f7 FALLBACK
# op op_unused_f8 FALLBACK
# op op_unused_f9 FALLBACK
- op op_invoke_polymorphic FALLBACK
- op op_invoke_polymorphic_range FALLBACK
- op op_invoke_custom FALLBACK
- op op_invoke_custom_range FALLBACK
- # op op_unused_fe FALLBACK
- # op op_unused_ff FALLBACK
+ # op op_invoke_polymorphic FALLBACK
+ # op op_invoke_polymorphic_range FALLBACK
+ # op op_invoke_custom FALLBACK
+ # op op_invoke_custom_range FALLBACK
+ # op op_const_method_handle FALLBACK
+ # op op_const_method_type FALLBACK
op-end
# common subroutines for asm; we emit the footer before alternate
@@ -301,3 +301,6 @@ asm-alt-stub arm64/alt_stub.S
# emit alternate entry stubs
alt-ops
+
+# finish by closing .cfi info
+import arm64/close_cfi.S
diff --git a/runtime/interpreter/mterp/config_mips b/runtime/interpreter/mterp/config_mips
index fe07385b5a..d6173daf2c 100644
--- a/runtime/interpreter/mterp/config_mips
+++ b/runtime/interpreter/mterp/config_mips
@@ -286,12 +286,12 @@ op-start mips
# op op_unused_f7 FALLBACK
# op op_unused_f8 FALLBACK
# op op_unused_f9 FALLBACK
- op op_invoke_polymorphic FALLBACK
- op op_invoke_polymorphic_range FALLBACK
- op op_invoke_custom FALLBACK
- op op_invoke_custom_range FALLBACK
- # op op_unused_fe FALLBACK
- # op op_unused_ff FALLBACK
+ # op op_invoke_polymorphic FALLBACK
+ # op op_invoke_polymorphic_range FALLBACK
+ # op op_invoke_custom FALLBACK
+ # op op_invoke_custom_range FALLBACK
+ # op op_const_method_handle FALLBACK
+ # op op_const_method_type FALLBACK
op-end
# common subroutines for asm
diff --git a/runtime/interpreter/mterp/config_mips64 b/runtime/interpreter/mterp/config_mips64
index d24cf4d8d0..a9bf362ec3 100644
--- a/runtime/interpreter/mterp/config_mips64
+++ b/runtime/interpreter/mterp/config_mips64
@@ -286,12 +286,12 @@ op-start mips64
# op op_unused_f7 FALLBACK
# op op_unused_f8 FALLBACK
# op op_unused_f9 FALLBACK
- op op_invoke_polymorphic FALLBACK
- op op_invoke_polymorphic_range FALLBACK
- op op_invoke_custom FALLBACK
- op op_invoke_custom_range FALLBACK
- # op op_unused_fe FALLBACK
- # op op_unused_ff FALLBACK
+ # op op_invoke_polymorphic FALLBACK
+ # op op_invoke_polymorphic_range FALLBACK
+ # op op_invoke_custom FALLBACK
+ # op op_invoke_custom_range FALLBACK
+ # op op_const_method_handle FALLBACK
+ # op op_const_method_type FALLBACK
op-end
# common subroutines for asm
diff --git a/runtime/interpreter/mterp/config_x86 b/runtime/interpreter/mterp/config_x86
index 076baf2907..2417851c11 100644
--- a/runtime/interpreter/mterp/config_x86
+++ b/runtime/interpreter/mterp/config_x86
@@ -290,12 +290,12 @@ op-start x86
# op op_unused_f7 FALLBACK
# op op_unused_f8 FALLBACK
# op op_unused_f9 FALLBACK
- op op_invoke_polymorphic FALLBACK
- op op_invoke_polymorphic_range FALLBACK
- op op_invoke_custom FALLBACK
- op op_invoke_custom_range FALLBACK
- # op op_unused_fe FALLBACK
- # op op_unused_ff FALLBACK
+ # op op_invoke_polymorphic FALLBACK
+ # op op_invoke_polymorphic_range FALLBACK
+ # op op_invoke_custom FALLBACK
+ # op op_invoke_custom_range FALLBACK
+ # op op_const_method_handle FALLBACK
+ # op op_const_method_type FALLBACK
op-end
# common subroutines for asm
diff --git a/runtime/interpreter/mterp/config_x86_64 b/runtime/interpreter/mterp/config_x86_64
index 44b671a36f..89fbf43444 100644
--- a/runtime/interpreter/mterp/config_x86_64
+++ b/runtime/interpreter/mterp/config_x86_64
@@ -290,12 +290,12 @@ op-start x86_64
# op op_unused_f7 FALLBACK
# op op_unused_f8 FALLBACK
# op op_unused_f9 FALLBACK
- op op_invoke_polymorphic FALLBACK
- op op_invoke_polymorphic_range FALLBACK
- op op_invoke_custom FALLBACK
- op op_invoke_custom_range FALLBACK
- # op op_unused_fe FALLBACK
- # op op_unused_ff FALLBACK
+ # op op_invoke_polymorphic FALLBACK
+ # op op_invoke_polymorphic_range FALLBACK
+ # op op_invoke_custom FALLBACK
+ # op op_invoke_custom_range FALLBACK
+ # op op_const_method_handle FALLBACK
+ # op op_const_method_type FALLBACK
op-end
# common subroutines for asm
diff --git a/runtime/interpreter/mterp/gen_mterp.py b/runtime/interpreter/mterp/gen_mterp.py
index 5839b5fc97..1c9af30d0a 100755
--- a/runtime/interpreter/mterp/gen_mterp.py
+++ b/runtime/interpreter/mterp/gen_mterp.py
@@ -284,7 +284,6 @@ def loadAndEmitOpcodes():
# point MterpAsmInstructionStart at the first handler or stub
asm_fp.write("\n .global %s\n" % start_label)
- asm_fp.write(" " + (function_type_format % start_label) + "\n");
asm_fp.write("%s = " % start_label + label_prefix + "_op_nop\n")
asm_fp.write(" .text\n\n")
@@ -310,7 +309,6 @@ def loadAndEmitOpcodes():
asm_fp.write(label_prefix + "_op_nop: /* dummy */\n");
emitAlign()
- asm_fp.write(" " + (function_size_format % (start_label, start_label)) + "\n")
asm_fp.write(" .global %s\n" % end_label)
asm_fp.write("%s:\n" % end_label)
@@ -319,12 +317,10 @@ def loadAndEmitOpcodes():
end_sister_label = global_name_format % "artMterpAsmSisterEnd"
emitSectionComment("Sister implementations", asm_fp)
asm_fp.write(" .global %s\n" % start_sister_label)
- asm_fp.write(" " + (function_type_format % start_sister_label) + "\n");
asm_fp.write(" .text\n")
asm_fp.write(" .balign 4\n")
asm_fp.write("%s:\n" % start_sister_label)
asm_fp.writelines(sister_list)
- asm_fp.write("\n " + (function_size_format % (start_sister_label, start_sister_label)) + "\n")
asm_fp.write(" .global %s\n" % end_sister_label)
asm_fp.write("%s:\n\n" % end_sister_label)
@@ -351,7 +347,6 @@ def loadAndEmitAltOpcodes():
# point MterpAsmInstructionStart at the first handler or stub
asm_fp.write("\n .global %s\n" % start_label)
- asm_fp.write(" " + (function_type_format % start_label) + "\n");
asm_fp.write(" .text\n\n")
asm_fp.write("%s = " % start_label + label_prefix + "_ALT_op_nop\n")
@@ -364,7 +359,6 @@ def loadAndEmitAltOpcodes():
loadAndEmitAltStub(source, i)
emitAlign()
- asm_fp.write(" " + (function_size_format % (start_label, start_label)) + "\n")
asm_fp.write(" .global %s\n" % end_label)
asm_fp.write("%s:\n" % end_label)
diff --git a/runtime/interpreter/mterp/mips/const.S b/runtime/interpreter/mterp/mips/const.S
new file mode 100644
index 0000000000..5d8379dfb7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/const.S
@@ -0,0 +1,17 @@
+%default { "helper":"UndefinedConstHandler" }
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern $helper
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- BBBB
+ GET_OPA(a1) # a1 <- AA
+ addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
+ move a3, rSELF
+ JAL($helper) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST(2) # load rINST
+ bnez v0, MterpPossibleException
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/invoke_polymorphic.S b/runtime/interpreter/mterp/mips/invoke_polymorphic.S
new file mode 100644
index 0000000000..5c963f0314
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/invoke_polymorphic.S
@@ -0,0 +1,19 @@
+%default { "helper":"UndefinedInvokeHandler" }
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern $helper
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL($helper)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(4)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
diff --git a/runtime/interpreter/mterp/mips/op_const_class.S b/runtime/interpreter/mterp/mips/op_const_class.S
index 9adea44577..5b3c96819a 100644
--- a/runtime/interpreter/mterp/mips/op_const_class.S
+++ b/runtime/interpreter/mterp/mips/op_const_class.S
@@ -1,12 +1 @@
- /* const/class vAA, class@BBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL(MterpConstClass)
- PREFETCH_INST(2) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+%include "mips/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/mips/op_const_method_handle.S b/runtime/interpreter/mterp/mips/op_const_method_handle.S
new file mode 100644
index 0000000000..4011e435c4
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_method_handle.S
@@ -0,0 +1 @@
+%include "mips/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/mips/op_const_method_type.S b/runtime/interpreter/mterp/mips/op_const_method_type.S
new file mode 100644
index 0000000000..18a5e0f688
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_const_method_type.S
@@ -0,0 +1 @@
+%include "mips/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/mips/op_const_string.S b/runtime/interpreter/mterp/mips/op_const_string.S
index 006e114d0f..0bab6b4068 100644
--- a/runtime/interpreter/mterp/mips/op_const_string.S
+++ b/runtime/interpreter/mterp/mips/op_const_string.S
@@ -1,12 +1 @@
- /* const/string vAA, string@BBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
- PREFETCH_INST(2) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+%include "mips/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_custom.S b/runtime/interpreter/mterp/mips/op_invoke_custom.S
new file mode 100644
index 0000000000..f9241c43c6
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_custom.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_custom_range.S b/runtime/interpreter/mterp/mips/op_invoke_custom_range.S
new file mode 100644
index 0000000000..862a614404
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_custom_range.S
@@ -0,0 +1 @@
+%include "mips/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_polymorphic.S b/runtime/interpreter/mterp/mips/op_invoke_polymorphic.S
new file mode 100644
index 0000000000..85e01e7221
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_polymorphic.S
@@ -0,0 +1 @@
+%include "mips/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/mips/op_invoke_polymorphic_range.S
new file mode 100644
index 0000000000..ce6397837b
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/op_invoke_polymorphic_range.S
@@ -0,0 +1 @@
+%include "mips/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/mips/op_unused_fe.S b/runtime/interpreter/mterp/mips/op_unused_fe.S
deleted file mode 100644
index 99ef3cf308..0000000000
--- a/runtime/interpreter/mterp/mips/op_unused_fe.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_ff.S b/runtime/interpreter/mterp/mips/op_unused_ff.S
deleted file mode 100644
index 99ef3cf308..0000000000
--- a/runtime/interpreter/mterp/mips/op_unused_ff.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/const.S b/runtime/interpreter/mterp/mips64/const.S
new file mode 100644
index 0000000000..2ec1173a7c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/const.S
@@ -0,0 +1,17 @@
+%default { "helper":"UndefinedConstHandler" }
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern $helper
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- BBBB
+ srl a1, rINST, 8 # a1 <- AA
+ daddu a2, rFP, OFF_FP_SHADOWFRAME
+ move a3, rSELF
+ jal $helper # (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 # load rINST
+ bnez v0, MterpPossibleException # let reference interpreter deal with it.
+ ADVANCE 2 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/header.S b/runtime/interpreter/mterp/mips64/header.S
index b67df20a1f..264c411a55 100644
--- a/runtime/interpreter/mterp/mips64/header.S
+++ b/runtime/interpreter/mterp/mips64/header.S
@@ -14,17 +14,50 @@
* limitations under the License.
*/
-#include <machine/regdef.h>
+#define zero $$0 /* always zero */
+#define AT $$at /* assembler temp */
+#define v0 $$2 /* return value */
+#define v1 $$3
+#define a0 $$4 /* argument registers */
+#define a1 $$5
+#define a2 $$6
+#define a3 $$7
+#define a4 $$8 /* expanded register arguments */
+#define a5 $$9
+#define a6 $$10
+#define a7 $$11
+#define ta0 $$8 /* alias */
+#define ta1 $$9
+#define ta2 $$10
+#define ta3 $$11
+#define t0 $$12 /* temp registers (not saved across subroutine calls) */
+#define t1 $$13
+#define t2 $$14
+#define t3 $$15
-/* TODO: add the missing file and use its FP register definitions. */
-/* #include <machine/fpregdef.h> */
-/* FP register definitions */
-#define f0 $$f0
-#define f1 $$f1
-#define f2 $$f2
-#define f3 $$f3
-#define f12 $$f12
-#define f13 $$f13
+#define s0 $$16 /* saved across subroutine calls (callee saved) */
+#define s1 $$17
+#define s2 $$18
+#define s3 $$19
+#define s4 $$20
+#define s5 $$21
+#define s6 $$22
+#define s7 $$23
+#define t8 $$24 /* two more temp registers */
+#define t9 $$25
+#define k0 $$26 /* kernel temporary */
+#define k1 $$27
+#define gp $$28 /* global pointer */
+#define sp $$29 /* stack pointer */
+#define s8 $$30 /* one more callee saved */
+#define ra $$31 /* return address */
+
+#define f0 $$f0
+#define f1 $$f1
+#define f2 $$f2
+#define f3 $$f3
+#define f12 $$f12
+#define f13 $$f13
/*
* It looks like the GNU assembler currently does not support the blec and bgtc
diff --git a/runtime/interpreter/mterp/mips64/invoke_polymorphic.S b/runtime/interpreter/mterp/mips64/invoke_polymorphic.S
new file mode 100644
index 0000000000..fa82083276
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/invoke_polymorphic.S
@@ -0,0 +1,20 @@
+%default { "helper":"UndefinedInvokeHandler" }
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern $helper
+ .extern MterpShouldSwitchInterpreters
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal $helper
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 4
+ jal MterpShouldSwitchInterpreters
+ bnezc v0, MterpFallback
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
diff --git a/runtime/interpreter/mterp/mips64/op_const_class.S b/runtime/interpreter/mterp/mips64/op_const_class.S
index adf79df38e..3f0c716d5e 100644
--- a/runtime/interpreter/mterp/mips64/op_const_class.S
+++ b/runtime/interpreter/mterp/mips64/op_const_class.S
@@ -1,13 +1 @@
- /* const/class vAA, Class//BBBB */
- .extern MterpConstClass
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal MterpConstClass # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
+%include "mips64/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/mips64/op_const_method_handle.S b/runtime/interpreter/mterp/mips64/op_const_method_handle.S
new file mode 100644
index 0000000000..43584d179c
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_method_handle.S
@@ -0,0 +1 @@
+%include "mips64/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/mips64/op_const_method_type.S b/runtime/interpreter/mterp/mips64/op_const_method_type.S
new file mode 100644
index 0000000000..553b28424a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_const_method_type.S
@@ -0,0 +1 @@
+%include "mips64/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/mips64/op_const_string.S b/runtime/interpreter/mterp/mips64/op_const_string.S
index 4684c11854..96cbb5a23a 100644
--- a/runtime/interpreter/mterp/mips64/op_const_string.S
+++ b/runtime/interpreter/mterp/mips64/op_const_string.S
@@ -1,13 +1 @@
- /* const/string vAA, String//BBBB */
- .extern MterpConstString
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal MterpConstString # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
+%include "mips64/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_custom.S b/runtime/interpreter/mterp/mips64/op_invoke_custom.S
new file mode 100644
index 0000000000..964253d8b7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_custom.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_custom_range.S b/runtime/interpreter/mterp/mips64/op_invoke_custom_range.S
new file mode 100644
index 0000000000..e6585e3646
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_custom_range.S
@@ -0,0 +1 @@
+%include "mips64/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_polymorphic.S b/runtime/interpreter/mterp/mips64/op_invoke_polymorphic.S
new file mode 100644
index 0000000000..d9324d73bf
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_polymorphic.S
@@ -0,0 +1 @@
+%include "mips64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/mips64/op_invoke_polymorphic_range.S
new file mode 100644
index 0000000000..8e0ecb570a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/op_invoke_polymorphic_range.S
@@ -0,0 +1 @@
+%include "mips64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fe.S b/runtime/interpreter/mterp/mips64/op_unused_fe.S
deleted file mode 100644
index 29463d73fc..0000000000
--- a/runtime/interpreter/mterp/mips64/op_unused_fe.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_ff.S b/runtime/interpreter/mterp/mips64/op_unused_ff.S
deleted file mode 100644
index 29463d73fc..0000000000
--- a/runtime/interpreter/mterp/mips64/op_unused_ff.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 2318125798..404c2609e8 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -211,6 +211,28 @@ extern "C" size_t MterpInvokeStatic(Thread* self,
self, *shadow_frame, inst, inst_data, result_register);
}
+extern "C" size_t MterpInvokeCustom(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvokeCustom<false /* is_range */>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" size_t MterpInvokePolymorphic(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvokePolymorphic<false /* is_range */>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
extern "C" size_t MterpInvokeVirtualRange(Thread* self,
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
@@ -266,6 +288,27 @@ extern "C" size_t MterpInvokeStaticRange(Thread* self,
self, *shadow_frame, inst, inst_data, result_register);
}
+extern "C" size_t MterpInvokeCustomRange(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvokeCustom<true /* is_range */>(self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" size_t MterpInvokePolymorphicRange(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvokePolymorphic<true /* is_range */>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
extern "C" size_t MterpInvokeVirtualQuick(Thread* self,
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
@@ -339,6 +382,32 @@ extern "C" size_t MterpConstClass(uint32_t index,
return false;
}
+extern "C" size_t MterpConstMethodHandle(uint32_t index,
+ uint32_t tgt_vreg,
+ ShadowFrame* shadow_frame,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::MethodHandle> mh = ResolveMethodHandle(self, index, shadow_frame->GetMethod());
+ if (UNLIKELY(mh == nullptr)) {
+ return true;
+ }
+ shadow_frame->SetVRegReference(tgt_vreg, mh.Ptr());
+ return false;
+}
+
+extern "C" size_t MterpConstMethodType(uint32_t index,
+ uint32_t tgt_vreg,
+ ShadowFrame* shadow_frame,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::MethodType> mt = ResolveMethodType(self, index, shadow_frame->GetMethod());
+ if (UNLIKELY(mt == nullptr)) {
+ return true;
+ }
+ shadow_frame->SetVRegReference(tgt_vreg, mt.Ptr());
+ return false;
+}
+
extern "C" size_t MterpCheckCast(uint32_t index,
StackReference<mirror::Object>* vreg_addr,
art::ArtMethod* method,
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index d6a27b8fc9..8ca5bd457b 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -338,8 +338,6 @@ unspecified registers or condition codes.
.text
.align 2
- .global ExecuteMterpImpl
- .type ExecuteMterpImpl, %function
/*
* On entry:
@@ -398,7 +396,6 @@ ENTRY ExecuteMterpImpl
.global artMterpAsmInstructionStart
- .type artMterpAsmInstructionStart, %function
artMterpAsmInstructionStart = .L_op_nop
.text
@@ -841,13 +838,18 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
.L_op_const_string: /* 0x1a */
/* File: arm/op_const_string.S */
- /* const/string vAA, String@BBBB */
+/* File: arm/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstString
EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
+ FETCH r0, 1 @ r0<- BBBB
mov r1, rINST, lsr #8 @ r1<- AA
add r2, rFP, #OFF_FP_SHADOWFRAME
mov r3, rSELF
- bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
+ bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
PREFETCH_INST 2 @ load rINST
cmp r0, #0 @ fail?
bne MterpPossibleException @ let reference interpreter deal with it.
@@ -855,6 +857,7 @@ artMterpAsmInstructionStart = .L_op_nop
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
+
/* ------------------------------ */
.balign 128
.L_op_const_string_jumbo: /* 0x1b */
@@ -879,20 +882,26 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
.L_op_const_class: /* 0x1c */
/* File: arm/op_const_class.S */
- /* const/class vAA, Class@BBBB */
+/* File: arm/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstClass
EXPORT_PC
FETCH r0, 1 @ r0<- BBBB
mov r1, rINST, lsr #8 @ r1<- AA
add r2, rFP, #OFF_FP_SHADOWFRAME
mov r3, rSELF
- bl MterpConstClass @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2
+ bl MterpConstClass @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 @ load rINST
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 2 @ advance rPC
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
+
/* ------------------------------ */
.balign 128
.L_op_monitor_enter: /* 0x1d */
@@ -7335,55 +7344,169 @@ constvalop_long_to_double:
/* ------------------------------ */
.balign 128
.L_op_invoke_polymorphic: /* 0xfa */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: arm/op_invoke_polymorphic.S */
+/* File: arm/invoke_polymorphic.S */
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern MterpInvokePolymorphic
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokePolymorphic
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 4
+ bl MterpShouldSwitchInterpreters
+ cmp r0, #0
+ bne MterpFallback
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
/* ------------------------------ */
.balign 128
.L_op_invoke_polymorphic_range: /* 0xfb */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: arm/op_invoke_polymorphic_range.S */
+/* File: arm/invoke_polymorphic.S */
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern MterpInvokePolymorphicRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokePolymorphicRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 4
+ bl MterpShouldSwitchInterpreters
+ cmp r0, #0
+ bne MterpFallback
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
/* ------------------------------ */
.balign 128
.L_op_invoke_custom: /* 0xfc */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: arm/op_invoke_custom.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeCustom
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeCustom
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ bl MterpShouldSwitchInterpreters
+ cmp r0, #0
+ bne MterpFallback
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+ /*
+ * Handle an invoke-custom invocation.
+ *
+ * for: invoke-custom, invoke-custom/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, call_site@BBBB */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, call_site@BBBB */
+
/* ------------------------------ */
.balign 128
.L_op_invoke_custom_range: /* 0xfd */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: arm/op_invoke_custom_range.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeCustomRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeCustomRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ bl MterpShouldSwitchInterpreters
+ cmp r0, #0
+ bne MterpFallback
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
/* ------------------------------ */
.balign 128
-.L_op_unused_fe: /* 0xfe */
-/* File: arm/op_unused_fe.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_const_method_handle: /* 0xfe */
+/* File: arm/op_const_method_handle.S */
+/* File: arm/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstMethodHandle
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstMethodHandle @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 @ load rINST
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
/* ------------------------------ */
.balign 128
-.L_op_unused_ff: /* 0xff */
-/* File: arm/op_unused_ff.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_const_method_type: /* 0xff */
+/* File: arm/op_const_method_type.S */
+/* File: arm/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstMethodType
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstMethodType @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 @ load rINST
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
.balign 128
- .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
@@ -7393,7 +7516,6 @@ artMterpAsmInstructionEnd:
* ===========================================================================
*/
.global artMterpAsmSisterStart
- .type artMterpAsmSisterStart, %function
.text
.balign 4
artMterpAsmSisterStart:
@@ -7459,14 +7581,11 @@ d2l_maybeNaN:
mov r0, #0
mov r1, #0
bx lr @ return 0 for NaN
-
- .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
.global artMterpAsmAltInstructionStart
- .type artMterpAsmAltInstructionStart, %function
.text
artMterpAsmAltInstructionStart = .L_ALT_op_nop
@@ -11790,7 +11909,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fe: /* 0xfe */
+.L_ALT_op_const_method_handle: /* 0xfe */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11807,7 +11926,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_ff: /* 0xff */
+.L_ALT_op_const_method_type: /* 0xff */
/* File: arm/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11823,7 +11942,6 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
.balign 128
- .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
.global artMterpAsmAltInstructionEnd
artMterpAsmAltInstructionEnd:
/* File: arm/footer.S */
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index 3d05996521..d4423ab0c4 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -338,6 +338,23 @@ codes.
.cfi_adjust_cfa_offset -(\frame_adjustment)
.endm
+/*
+ * cfi support macros.
+ */
+.macro ENTRY name
+ .type \name, #function
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+ .cfi_startproc
+.endm
+
+.macro END name
+ .cfi_endproc
+ .size \name, .-\name
+.endm
+
/* File: arm64/entry.S */
/*
* Copyright (C) 2016 The Android Open Source Project
@@ -366,12 +383,7 @@ codes.
* x3 JValue* result_register
*
*/
- .global ExecuteMterpImpl
- .type ExecuteMterpImpl, %function
- .balign 16
-
-ExecuteMterpImpl:
- .cfi_startproc
+ENTRY ExecuteMterpImpl
SAVE_TWO_REGS_INCREASE_FRAME xPROFILE, x27, 80
SAVE_TWO_REGS xIBASE, xREFS, 16
SAVE_TWO_REGS xSELF, xINST, 32
@@ -413,7 +425,6 @@ ExecuteMterpImpl:
.global artMterpAsmInstructionStart
- .type artMterpAsmInstructionStart, %function
artMterpAsmInstructionStart = .L_op_nop
.text
@@ -849,19 +860,25 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
.L_op_const_string: /* 0x1a */
/* File: arm64/op_const_string.S */
- /* const/string vAA, String//BBBB */
+/* File: arm64/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstString
EXPORT_PC
FETCH w0, 1 // w0<- BBBB
lsr w1, wINST, #8 // w1<- AA
add x2, xFP, #OFF_FP_SHADOWFRAME
mov x3, xSELF
- bl MterpConstString // (index, tgt_reg, shadow_frame, self)
+ bl MterpConstString // (index, tgt_reg, shadow_frame, self)
PREFETCH_INST 2 // load rINST
cbnz w0, MterpPossibleException // let reference interpreter deal with it.
ADVANCE 2 // advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
GOTO_OPCODE ip // jump to next instruction
+
/* ------------------------------ */
.balign 128
.L_op_const_string_jumbo: /* 0x1b */
@@ -885,19 +902,25 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
.L_op_const_class: /* 0x1c */
/* File: arm64/op_const_class.S */
- /* const/class vAA, Class//BBBB */
+/* File: arm64/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstClass
EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
+ FETCH w0, 1 // w0<- BBBB
lsr w1, wINST, #8 // w1<- AA
add x2, xFP, #OFF_FP_SHADOWFRAME
mov x3, xSELF
- bl MterpConstClass // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2
- cbnz w0, MterpPossibleException
- ADVANCE 2
+ bl MterpConstClass // (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 // load rINST
+ cbnz w0, MterpPossibleException // let reference interpreter deal with it.
+ ADVANCE 2 // advance rPC
GET_INST_OPCODE ip // extract opcode from rINST
GOTO_OPCODE ip // jump to next instruction
+
/* ------------------------------ */
.balign 128
.L_op_monitor_enter: /* 0x1d */
@@ -6902,55 +6925,152 @@ artMterpAsmInstructionStart = .L_op_nop
/* ------------------------------ */
.balign 128
.L_op_invoke_polymorphic: /* 0xfa */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: arm64/op_invoke_polymorphic.S */
+/* File: arm64/invoke_polymorphic.S */
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern MterpInvokePolymorphic
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ mov x3, xINST
+ bl MterpInvokePolymorphic
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 4
+ bl MterpShouldSwitchInterpreters
+ cbnz w0, MterpFallback
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
/* ------------------------------ */
.balign 128
.L_op_invoke_polymorphic_range: /* 0xfb */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: arm64/op_invoke_polymorphic_range.S */
+/* File: arm64/invoke_polymorphic.S */
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern MterpInvokePolymorphicRange
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ mov x3, xINST
+ bl MterpInvokePolymorphicRange
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 4
+ bl MterpShouldSwitchInterpreters
+ cbnz w0, MterpFallback
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
/* ------------------------------ */
.balign 128
.L_op_invoke_custom: /* 0xfc */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: arm64/op_invoke_custom.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeCustom
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ mov x3, xINST
+ bl MterpInvokeCustom
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ bl MterpShouldSwitchInterpreters
+ cbnz w0, MterpFallback
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
/* ------------------------------ */
.balign 128
.L_op_invoke_custom_range: /* 0xfd */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: arm64/op_invoke_custom_range.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeCustomRange
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ mov x3, xINST
+ bl MterpInvokeCustomRange
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ bl MterpShouldSwitchInterpreters
+ cbnz w0, MterpFallback
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
/* ------------------------------ */
.balign 128
-.L_op_unused_fe: /* 0xfe */
-/* File: arm64/op_unused_fe.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_const_method_handle: /* 0xfe */
+/* File: arm64/op_const_method_handle.S */
+/* File: arm64/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstMethodHandle
+ EXPORT_PC
+ FETCH w0, 1 // w0<- BBBB
+ lsr w1, wINST, #8 // w1<- AA
+ add x2, xFP, #OFF_FP_SHADOWFRAME
+ mov x3, xSELF
+ bl MterpConstMethodHandle // (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 // load rINST
+ cbnz w0, MterpPossibleException // let reference interpreter deal with it.
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
/* ------------------------------ */
.balign 128
-.L_op_unused_ff: /* 0xff */
-/* File: arm64/op_unused_ff.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_const_method_type: /* 0xff */
+/* File: arm64/op_const_method_type.S */
+/* File: arm64/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstMethodType
+ EXPORT_PC
+ FETCH w0, 1 // w0<- BBBB
+ lsr w1, wINST, #8 // w1<- AA
+ add x2, xFP, #OFF_FP_SHADOWFRAME
+ mov x3, xSELF
+ bl MterpConstMethodType // (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 // load rINST
+ cbnz w0, MterpPossibleException // let reference interpreter deal with it.
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
.balign 128
- .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
@@ -6960,12 +7080,9 @@ artMterpAsmInstructionEnd:
* ===========================================================================
*/
.global artMterpAsmSisterStart
- .type artMterpAsmSisterStart, %function
.text
.balign 4
artMterpAsmSisterStart:
-
- .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
@@ -7277,13 +7394,9 @@ MterpProfileActive:
RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
ret
- .cfi_endproc
- .size ExecuteMterpImpl, .-ExecuteMterpImpl
-
.global artMterpAsmAltInstructionStart
- .type artMterpAsmAltInstructionStart, %function
.text
artMterpAsmAltInstructionStart = .L_ALT_op_nop
@@ -11607,7 +11720,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fe: /* 0xfe */
+.L_ALT_op_const_method_handle: /* 0xfe */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11624,7 +11737,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_ff: /* 0xff */
+.L_ALT_op_const_method_type: /* 0xff */
/* File: arm64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11640,6 +11753,11 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
.balign 128
- .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
.global artMterpAsmAltInstructionEnd
artMterpAsmAltInstructionEnd:
+/* File: arm64/close_cfi.S */
+// Close out the cfi info. We're treating mterp as a single function.
+
+END ExecuteMterpImpl
+
+
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index 144c8e5165..e8308358f6 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -809,7 +809,6 @@ ExecuteMterpImpl:
.global artMterpAsmInstructionStart
- .type artMterpAsmInstructionStart, %function
artMterpAsmInstructionStart = .L_op_nop
.text
@@ -1226,19 +1225,25 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
.L_op_const_string: /* 0x1a */
/* File: mips/op_const_string.S */
- /* const/string vAA, string@BBBB */
+/* File: mips/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstString
EXPORT_PC()
FETCH(a0, 1) # a0 <- BBBB
GET_OPA(a1) # a1 <- AA
addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
move a3, rSELF
- JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+ JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
PREFETCH_INST(2) # load rINST
bnez v0, MterpPossibleException
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
+
/* ------------------------------ */
.balign 128
.L_op_const_string_jumbo: /* 0x1b */
@@ -1262,19 +1267,25 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
.L_op_const_class: /* 0x1c */
/* File: mips/op_const_class.S */
- /* const/class vAA, class@BBBB */
+/* File: mips/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstClass
EXPORT_PC()
FETCH(a0, 1) # a0 <- BBBB
GET_OPA(a1) # a1 <- AA
addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
move a3, rSELF
- JAL(MterpConstClass)
+ JAL(MterpConstClass) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
PREFETCH_INST(2) # load rINST
bnez v0, MterpPossibleException
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
+
/* ------------------------------ */
.balign 128
.L_op_monitor_enter: /* 0x1d */
@@ -7715,51 +7726,150 @@ artMterpAsmInstructionStart = .L_op_nop
/* ------------------------------ */
.balign 128
.L_op_invoke_polymorphic: /* 0xfa */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: mips/op_invoke_polymorphic.S */
+/* File: mips/invoke_polymorphic.S */
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern MterpInvokePolymorphic
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokePolymorphic)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(4)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
/* ------------------------------ */
.balign 128
.L_op_invoke_polymorphic_range: /* 0xfb */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: mips/op_invoke_polymorphic_range.S */
+/* File: mips/invoke_polymorphic.S */
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern MterpInvokePolymorphicRange
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokePolymorphicRange)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(4)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
/* ------------------------------ */
.balign 128
.L_op_invoke_custom: /* 0xfc */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: mips/op_invoke_custom.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeCustom
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeCustom)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
/* ------------------------------ */
.balign 128
.L_op_invoke_custom_range: /* 0xfd */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: mips/op_invoke_custom_range.S */
+/* File: mips/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeCustomRange
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL(MterpInvokeCustomRange)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(3)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
/* ------------------------------ */
.balign 128
-.L_op_unused_fe: /* 0xfe */
-/* File: mips/op_unused_fe.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_const_method_handle: /* 0xfe */
+/* File: mips/op_const_method_handle.S */
+/* File: mips/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstMethodHandle
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- BBBB
+ GET_OPA(a1) # a1 <- AA
+ addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
+ move a3, rSELF
+ JAL(MterpConstMethodHandle) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST(2) # load rINST
+ bnez v0, MterpPossibleException
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
/* ------------------------------ */
.balign 128
-.L_op_unused_ff: /* 0xff */
-/* File: mips/op_unused_ff.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_const_method_type: /* 0xff */
+/* File: mips/op_const_method_type.S */
+/* File: mips/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstMethodType
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- BBBB
+ GET_OPA(a1) # a1 <- AA
+ addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
+ move a3, rSELF
+ JAL(MterpConstMethodType) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST(2) # load rINST
+ bnez v0, MterpPossibleException
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
.balign 128
- .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
@@ -7769,7 +7879,6 @@ artMterpAsmInstructionEnd:
* ===========================================================================
*/
.global artMterpAsmSisterStart
- .type artMterpAsmSisterStart, %function
.text
.balign 4
artMterpAsmSisterStart:
@@ -7829,14 +7938,11 @@ artMterpAsmSisterStart:
.Lop_ushr_long_2addr_finish:
SET_VREG64_GOTO(v1, zero, t3, t0) # vA/vA+1 <- rlo/rhi
-
- .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
.global artMterpAsmAltInstructionStart
- .type artMterpAsmAltInstructionStart, %function
.text
artMterpAsmAltInstructionStart = .L_ALT_op_nop
@@ -12414,7 +12520,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fe: /* 0xfe */
+.L_ALT_op_const_method_handle: /* 0xfe */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12432,7 +12538,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_ff: /* 0xff */
+.L_ALT_op_const_method_type: /* 0xff */
/* File: mips/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12449,7 +12555,6 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
.balign 128
- .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
.global artMterpAsmAltInstructionEnd
artMterpAsmAltInstructionEnd:
/* File: mips/footer.S */
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index 28f1887539..b6af04072b 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -21,17 +21,50 @@
* limitations under the License.
*/
-#include <machine/regdef.h>
-
-/* TODO: add the missing file and use its FP register definitions. */
-/* #include <machine/fpregdef.h> */
-/* FP register definitions */
-#define f0 $f0
-#define f1 $f1
-#define f2 $f2
-#define f3 $f3
-#define f12 $f12
-#define f13 $f13
+#define zero $0 /* always zero */
+#define AT $at /* assembler temp */
+#define v0 $2 /* return value */
+#define v1 $3
+#define a0 $4 /* argument registers */
+#define a1 $5
+#define a2 $6
+#define a3 $7
+#define a4 $8 /* expanded register arguments */
+#define a5 $9
+#define a6 $10
+#define a7 $11
+#define ta0 $8 /* alias */
+#define ta1 $9
+#define ta2 $10
+#define ta3 $11
+#define t0 $12 /* temp registers (not saved across subroutine calls) */
+#define t1 $13
+#define t2 $14
+#define t3 $15
+
+#define s0 $16 /* saved across subroutine calls (callee saved) */
+#define s1 $17
+#define s2 $18
+#define s3 $19
+#define s4 $20
+#define s5 $21
+#define s6 $22
+#define s7 $23
+#define t8 $24 /* two more temp registers */
+#define t9 $25
+#define k0 $26 /* kernel temporary */
+#define k1 $27
+#define gp $28 /* global pointer */
+#define sp $29 /* stack pointer */
+#define s8 $30 /* one more callee saved */
+#define ra $31 /* return address */
+
+#define f0 $f0
+#define f1 $f1
+#define f2 $f2
+#define f3 $f3
+#define f12 $f12
+#define f13 $f13
/*
* It looks like the GNU assembler currently does not support the blec and bgtc
@@ -396,7 +429,6 @@ ExecuteMterpImpl:
.global artMterpAsmInstructionStart
- .type artMterpAsmInstructionStart, %function
artMterpAsmInstructionStart = .L_op_nop
.text
@@ -828,20 +860,25 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
.L_op_const_string: /* 0x1a */
/* File: mips64/op_const_string.S */
- /* const/string vAA, String//BBBB */
+/* File: mips64/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
.extern MterpConstString
EXPORT_PC
lhu a0, 2(rPC) # a0 <- BBBB
srl a1, rINST, 8 # a1 <- AA
daddu a2, rFP, OFF_FP_SHADOWFRAME
move a3, rSELF
- jal MterpConstString # (index, tgt_reg, shadow_frame, self)
+ jal MterpConstString # (index, tgt_reg, shadow_frame, self)
PREFETCH_INST 2 # load rINST
bnez v0, MterpPossibleException # let reference interpreter deal with it.
ADVANCE 2 # advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
GOTO_OPCODE v0 # jump to next instruction
+
/* ------------------------------ */
.balign 128
.L_op_const_string_jumbo: /* 0x1b */
@@ -866,20 +903,25 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
.L_op_const_class: /* 0x1c */
/* File: mips64/op_const_class.S */
- /* const/class vAA, Class//BBBB */
+/* File: mips64/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
.extern MterpConstClass
EXPORT_PC
lhu a0, 2(rPC) # a0 <- BBBB
srl a1, rINST, 8 # a1 <- AA
daddu a2, rFP, OFF_FP_SHADOWFRAME
move a3, rSELF
- jal MterpConstClass # (index, tgt_reg, shadow_frame, self)
+ jal MterpConstClass # (index, tgt_reg, shadow_frame, self)
PREFETCH_INST 2 # load rINST
bnez v0, MterpPossibleException # let reference interpreter deal with it.
ADVANCE 2 # advance rPC
GET_INST_OPCODE v0 # extract opcode from rINST
GOTO_OPCODE v0 # jump to next instruction
+
/* ------------------------------ */
.balign 128
.L_op_monitor_enter: /* 0x1d */
@@ -7106,51 +7148,154 @@ artMterpAsmInstructionStart = .L_op_nop
/* ------------------------------ */
.balign 128
.L_op_invoke_polymorphic: /* 0xfa */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: mips64/op_invoke_polymorphic.S */
+/* File: mips64/invoke_polymorphic.S */
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern MterpInvokePolymorphic
+ .extern MterpShouldSwitchInterpreters
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokePolymorphic
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 4
+ jal MterpShouldSwitchInterpreters
+ bnezc v0, MterpFallback
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
/* ------------------------------ */
.balign 128
.L_op_invoke_polymorphic_range: /* 0xfb */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: mips64/op_invoke_polymorphic_range.S */
+/* File: mips64/invoke_polymorphic.S */
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern MterpInvokePolymorphicRange
+ .extern MterpShouldSwitchInterpreters
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokePolymorphicRange
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 4
+ jal MterpShouldSwitchInterpreters
+ bnezc v0, MterpFallback
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
/* ------------------------------ */
.balign 128
.L_op_invoke_custom: /* 0xfc */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: mips64/op_invoke_custom.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeCustom
+ .extern MterpShouldSwitchInterpreters
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeCustom
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ jal MterpShouldSwitchInterpreters
+ bnezc v0, MterpFallback
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
/* ------------------------------ */
.balign 128
.L_op_invoke_custom_range: /* 0xfd */
-/* Transfer stub to alternate interpreter */
- b MterpFallback
+/* File: mips64/op_invoke_custom_range.S */
+/* File: mips64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeCustomRange
+ .extern MterpShouldSwitchInterpreters
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal MterpInvokeCustomRange
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 3
+ jal MterpShouldSwitchInterpreters
+ bnezc v0, MterpFallback
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
/* ------------------------------ */
.balign 128
-.L_op_unused_fe: /* 0xfe */
-/* File: mips64/op_unused_fe.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_const_method_handle: /* 0xfe */
+/* File: mips64/op_const_method_handle.S */
+/* File: mips64/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstMethodHandle
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- BBBB
+ srl a1, rINST, 8 # a1 <- AA
+ daddu a2, rFP, OFF_FP_SHADOWFRAME
+ move a3, rSELF
+ jal MterpConstMethodHandle # (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 # load rINST
+ bnez v0, MterpPossibleException # let reference interpreter deal with it.
+ ADVANCE 2 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
/* ------------------------------ */
.balign 128
-.L_op_unused_ff: /* 0xff */
-/* File: mips64/op_unused_ff.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
+.L_op_const_method_type: /* 0xff */
+/* File: mips64/op_const_method_type.S */
+/* File: mips64/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstMethodType
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- BBBB
+ srl a1, rINST, 8 # a1 <- AA
+ daddu a2, rFP, OFF_FP_SHADOWFRAME
+ move a3, rSELF
+ jal MterpConstMethodType # (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 # load rINST
+ bnez v0, MterpPossibleException # let reference interpreter deal with it.
+ ADVANCE 2 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
.balign 128
- .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
.global artMterpAsmInstructionEnd
artMterpAsmInstructionEnd:
@@ -7160,18 +7305,14 @@ artMterpAsmInstructionEnd:
* ===========================================================================
*/
.global artMterpAsmSisterStart
- .type artMterpAsmSisterStart, %function
.text
.balign 4
artMterpAsmSisterStart:
-
- .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
.global artMterpAsmAltInstructionStart
- .type artMterpAsmAltInstructionStart, %function
.text
artMterpAsmAltInstructionStart = .L_ALT_op_nop
@@ -12003,7 +12144,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fe: /* 0xfe */
+.L_ALT_op_const_method_handle: /* 0xfe */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12022,7 +12163,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_ff: /* 0xff */
+.L_ALT_op_const_method_type: /* 0xff */
/* File: mips64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12040,7 +12181,6 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
.balign 128
- .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
.global artMterpAsmAltInstructionEnd
artMterpAsmAltInstructionEnd:
/* File: mips64/footer.S */
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index 169501d563..514ecacb05 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -401,7 +401,6 @@ SYMBOL(ExecuteMterpImpl):
.global SYMBOL(artMterpAsmInstructionStart)
- FUNCTION_TYPE(SYMBOL(artMterpAsmInstructionStart))
SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.text
@@ -783,7 +782,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.balign 128
.L_op_const_string: /* 0x1a */
/* File: x86/op_const_string.S */
- /* const/string vAA, String@BBBB */
+/* File: x86/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstString
EXPORT_PC
movzwl 2(rPC), %eax # eax <- BBBB
movl %eax, OUT_ARG0(%esp)
@@ -792,12 +796,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
movl %eax, OUT_ARG2(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
+ call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
RESTORE_IBASE
testb %al, %al
jnz MterpPossibleException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
/* ------------------------------ */
.balign 128
.L_op_const_string_jumbo: /* 0x1b */
@@ -821,21 +826,27 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.balign 128
.L_op_const_class: /* 0x1c */
/* File: x86/op_const_class.S */
- /* const/class vAA, Class@BBBB */
+/* File: x86/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstClass
EXPORT_PC
- movzwl 2(rPC), %eax # eax<- BBBB
+ movzwl 2(rPC), %eax # eax <- BBBB
movl %eax, OUT_ARG0(%esp)
movl rINST, OUT_ARG1(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG2(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
+ call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
RESTORE_IBASE
testb %al, %al
jnz MterpPossibleException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
/* ------------------------------ */
.balign 128
.L_op_monitor_enter: /* 0x1d */
@@ -6281,55 +6292,178 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
/* ------------------------------ */
.balign 128
.L_op_invoke_polymorphic: /* 0xfa */
-/* Transfer stub to alternate interpreter */
- jmp MterpFallback
+/* File: x86/op_invoke_polymorphic.S */
+/* File: x86/invoke_polymorphic.S */
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern MterpInvokePolymorphic
+ EXPORT_PC
+ movl rSELF, %ecx
+ movl %ecx, OUT_ARG0(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG1(%esp)
+ movl rPC, OUT_ARG2(%esp)
+ REFRESH_INST 250
+ movl rINST, OUT_ARG3(%esp)
+ call SYMBOL(MterpInvokePolymorphic)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 4
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ RESTORE_IBASE
+ FETCH_INST
+ GOTO_NEXT
/* ------------------------------ */
.balign 128
.L_op_invoke_polymorphic_range: /* 0xfb */
-/* Transfer stub to alternate interpreter */
- jmp MterpFallback
+/* File: x86/op_invoke_polymorphic_range.S */
+/* File: x86/invoke_polymorphic.S */
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern MterpInvokePolymorphicRange
+ EXPORT_PC
+ movl rSELF, %ecx
+ movl %ecx, OUT_ARG0(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG1(%esp)
+ movl rPC, OUT_ARG2(%esp)
+ REFRESH_INST 251
+ movl rINST, OUT_ARG3(%esp)
+ call SYMBOL(MterpInvokePolymorphicRange)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 4
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ RESTORE_IBASE
+ FETCH_INST
+ GOTO_NEXT
/* ------------------------------ */
.balign 128
.L_op_invoke_custom: /* 0xfc */
-/* Transfer stub to alternate interpreter */
- jmp MterpFallback
+/* File: x86/op_invoke_custom.S */
+/* File: x86/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeCustom
+ EXPORT_PC
+ movl rSELF, %ecx
+ movl %ecx, OUT_ARG0(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG1(%esp)
+ movl rPC, OUT_ARG2(%esp)
+ REFRESH_INST 252
+ movl rINST, OUT_ARG3(%esp)
+ call SYMBOL(MterpInvokeCustom)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ RESTORE_IBASE
+ FETCH_INST
+ GOTO_NEXT
/* ------------------------------ */
.balign 128
.L_op_invoke_custom_range: /* 0xfd */
-/* Transfer stub to alternate interpreter */
- jmp MterpFallback
+/* File: x86/op_invoke_custom_range.S */
+/* File: x86/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeCustomRange
+ EXPORT_PC
+ movl rSELF, %ecx
+ movl %ecx, OUT_ARG0(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG1(%esp)
+ movl rPC, OUT_ARG2(%esp)
+ REFRESH_INST 253
+ movl rINST, OUT_ARG3(%esp)
+ call SYMBOL(MterpInvokeCustomRange)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ RESTORE_IBASE
+ FETCH_INST
+ GOTO_NEXT
/* ------------------------------ */
.balign 128
-.L_op_unused_fe: /* 0xfe */
-/* File: x86/op_unused_fe.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
+.L_op_const_method_handle: /* 0xfe */
+/* File: x86/op_const_method_handle.S */
+/* File: x86/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstMethodHandle
+ EXPORT_PC
+ movzwl 2(rPC), %eax # eax <- BBBB
+ movl %eax, OUT_ARG0(%esp)
+ movl rINST, OUT_ARG1(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG2(%esp)
+ movl rSELF, %eax
+ movl %eax, OUT_ARG3(%esp)
+ call SYMBOL(MterpConstMethodHandle) # (index, tgt_reg, shadow_frame, self)
+ RESTORE_IBASE
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
.balign 128
-.L_op_unused_ff: /* 0xff */
-/* File: x86/op_unused_ff.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
+.L_op_const_method_type: /* 0xff */
+/* File: x86/op_const_method_type.S */
+/* File: x86/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstMethodType
+ EXPORT_PC
+ movzwl 2(rPC), %eax # eax <- BBBB
+ movl %eax, OUT_ARG0(%esp)
+ movl rINST, OUT_ARG1(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG2(%esp)
+ movl rSELF, %eax
+ movl %eax, OUT_ARG3(%esp)
+ call SYMBOL(MterpConstMethodType) # (index, tgt_reg, shadow_frame, self)
+ RESTORE_IBASE
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
.balign 128
- SIZE(SYMBOL(artMterpAsmInstructionStart),SYMBOL(artMterpAsmInstructionStart))
.global SYMBOL(artMterpAsmInstructionEnd)
SYMBOL(artMterpAsmInstructionEnd):
@@ -6339,18 +6473,14 @@ SYMBOL(artMterpAsmInstructionEnd):
* ===========================================================================
*/
.global SYMBOL(artMterpAsmSisterStart)
- FUNCTION_TYPE(SYMBOL(artMterpAsmSisterStart))
.text
.balign 4
SYMBOL(artMterpAsmSisterStart):
-
- SIZE(SYMBOL(artMterpAsmSisterStart),SYMBOL(artMterpAsmSisterStart))
.global SYMBOL(artMterpAsmSisterEnd)
SYMBOL(artMterpAsmSisterEnd):
.global SYMBOL(artMterpAsmAltInstructionStart)
- FUNCTION_TYPE(SYMBOL(artMterpAsmAltInstructionStart))
.text
SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
@@ -12452,7 +12582,7 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fe: /* 0xfe */
+.L_ALT_op_const_method_handle: /* 0xfe */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12476,7 +12606,7 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_ff: /* 0xff */
+.L_ALT_op_const_method_type: /* 0xff */
/* File: x86/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -12499,7 +12629,6 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
jmp .L_op_nop+(255*128)
.balign 128
- SIZE(SYMBOL(artMterpAsmAltInstructionStart),SYMBOL(artMterpAsmAltInstructionStart))
.global SYMBOL(artMterpAsmAltInstructionEnd)
SYMBOL(artMterpAsmAltInstructionEnd):
/* File: x86/footer.S */
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
index b643072dde..cfee2b8f84 100644
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -383,7 +383,6 @@ SYMBOL(ExecuteMterpImpl):
.global SYMBOL(artMterpAsmInstructionStart)
- FUNCTION_TYPE(SYMBOL(artMterpAsmInstructionStart))
SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.text
@@ -739,17 +738,23 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.balign 128
.L_op_const_string: /* 0x1a */
/* File: x86_64/op_const_string.S */
- /* const/string vAA, String@BBBB */
+/* File: x86_64/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstString
EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # OUT_ARG0 <- BBBB
+ movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
movq rINSTq, OUT_ARG1
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
+ call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
testb %al, %al
jnz MterpPossibleException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
/* ------------------------------ */
.balign 128
.L_op_const_string_jumbo: /* 0x1b */
@@ -769,17 +774,23 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.balign 128
.L_op_const_class: /* 0x1c */
/* File: x86_64/op_const_class.S */
- /* const/class vAA, Class@BBBB */
+/* File: x86_64/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstClass
EXPORT_PC
movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
movq rINSTq, OUT_ARG1
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
+ call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
testb %al, %al
jnz MterpPossibleException
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
/* ------------------------------ */
.balign 128
.L_op_monitor_enter: /* 0x1d */
@@ -6048,55 +6059,158 @@ movswl %ax, %eax
/* ------------------------------ */
.balign 128
.L_op_invoke_polymorphic: /* 0xfa */
-/* Transfer stub to alternate interpreter */
- jmp MterpFallback
+/* File: x86_64/op_invoke_polymorphic.S */
+/* File: x86_64/invoke_polymorphic.S */
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern MterpInvokePolymorphic
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 250
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokePolymorphic)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 4
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
/* ------------------------------ */
.balign 128
.L_op_invoke_polymorphic_range: /* 0xfb */
-/* Transfer stub to alternate interpreter */
- jmp MterpFallback
+/* File: x86_64/op_invoke_polymorphic_range.S */
+/* File: x86_64/invoke_polymorphic.S */
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern MterpInvokePolymorphicRange
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 251
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokePolymorphicRange)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 4
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
/* ------------------------------ */
.balign 128
.L_op_invoke_custom: /* 0xfc */
-/* Transfer stub to alternate interpreter */
- jmp MterpFallback
+/* File: x86_64/op_invoke_custom.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeCustom
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 252
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeCustom)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
/* ------------------------------ */
.balign 128
.L_op_invoke_custom_range: /* 0xfd */
-/* Transfer stub to alternate interpreter */
- jmp MterpFallback
+/* File: x86_64/op_invoke_custom_range.S */
+/* File: x86_64/invoke.S */
+/*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeCustomRange
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST 253
+ movl rINST, OUT_32_ARG3
+ call SYMBOL(MterpInvokeCustomRange)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 3
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
/* ------------------------------ */
.balign 128
-.L_op_unused_fe: /* 0xfe */
-/* File: x86_64/op_unused_fe.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
+.L_op_const_method_handle: /* 0xfe */
+/* File: x86_64/op_const_method_handle.S */
+/* File: x86_64/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstMethodHandle
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
+ movq rINSTq, OUT_ARG1
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpConstMethodHandle) # (index, tgt_reg, shadow_frame, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
.balign 128
-.L_op_unused_ff: /* 0xff */
-/* File: x86_64/op_unused_ff.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
+.L_op_const_method_type: /* 0xff */
+/* File: x86_64/op_const_method_type.S */
+/* File: x86_64/const.S */
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern MterpConstMethodType
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
+ movq rINSTq, OUT_ARG1
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpConstMethodType) # (index, tgt_reg, shadow_frame, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
.balign 128
- SIZE(SYMBOL(artMterpAsmInstructionStart),SYMBOL(artMterpAsmInstructionStart))
.global SYMBOL(artMterpAsmInstructionEnd)
SYMBOL(artMterpAsmInstructionEnd):
@@ -6106,18 +6220,14 @@ SYMBOL(artMterpAsmInstructionEnd):
* ===========================================================================
*/
.global SYMBOL(artMterpAsmSisterStart)
- FUNCTION_TYPE(SYMBOL(artMterpAsmSisterStart))
.text
.balign 4
SYMBOL(artMterpAsmSisterStart):
-
- SIZE(SYMBOL(artMterpAsmSisterStart),SYMBOL(artMterpAsmSisterStart))
.global SYMBOL(artMterpAsmSisterEnd)
SYMBOL(artMterpAsmSisterEnd):
.global SYMBOL(artMterpAsmAltInstructionStart)
- FUNCTION_TYPE(SYMBOL(artMterpAsmAltInstructionStart))
.text
SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
@@ -11711,7 +11821,7 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_fe: /* 0xfe */
+.L_ALT_op_const_method_handle: /* 0xfe */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11733,7 +11843,7 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
-.L_ALT_op_unused_ff: /* 0xff */
+.L_ALT_op_const_method_type: /* 0xff */
/* File: x86_64/alt_stub.S */
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
@@ -11754,7 +11864,6 @@ SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
jmp .L_op_nop+(255*128)
.balign 128
- SIZE(SYMBOL(artMterpAsmAltInstructionStart),SYMBOL(artMterpAsmAltInstructionStart))
.global SYMBOL(artMterpAsmAltInstructionEnd)
SYMBOL(artMterpAsmAltInstructionEnd):
/* File: x86_64/footer.S */
diff --git a/runtime/interpreter/mterp/x86/const.S b/runtime/interpreter/mterp/x86/const.S
new file mode 100644
index 0000000000..f0cac1a19b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/const.S
@@ -0,0 +1,19 @@
+%default { "helper":"UndefinedConstHandler" }
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern $helper
+ EXPORT_PC
+ movzwl 2(rPC), %eax # eax <- BBBB
+ movl %eax, OUT_ARG0(%esp)
+ movl rINST, OUT_ARG1(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG2(%esp)
+ movl rSELF, %eax
+ movl %eax, OUT_ARG3(%esp)
+ call SYMBOL($helper) # (index, tgt_reg, shadow_frame, self)
+ RESTORE_IBASE
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/invoke_polymorphic.S b/runtime/interpreter/mterp/x86/invoke_polymorphic.S
new file mode 100644
index 0000000000..5690b22028
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/invoke_polymorphic.S
@@ -0,0 +1,25 @@
+%default { "helper":"UndefinedInvokeHandler" }
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern $helper
+ EXPORT_PC
+ movl rSELF, %ecx
+ movl %ecx, OUT_ARG0(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG1(%esp)
+ movl rPC, OUT_ARG2(%esp)
+ REFRESH_INST ${opnum}
+ movl rINST, OUT_ARG3(%esp)
+ call SYMBOL($helper)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 4
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ RESTORE_IBASE
+ FETCH_INST
+ GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86/op_const_class.S b/runtime/interpreter/mterp/x86/op_const_class.S
index 60be789214..71648b5df7 100644
--- a/runtime/interpreter/mterp/x86/op_const_class.S
+++ b/runtime/interpreter/mterp/x86/op_const_class.S
@@ -1,14 +1 @@
- /* const/class vAA, Class@BBBB */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax<- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+%include "x86/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/x86/op_const_method_handle.S b/runtime/interpreter/mterp/x86/op_const_method_handle.S
new file mode 100644
index 0000000000..77948fd8f9
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_const_method_handle.S
@@ -0,0 +1 @@
+%include "x86/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/x86/op_const_method_type.S b/runtime/interpreter/mterp/x86/op_const_method_type.S
new file mode 100644
index 0000000000..03c6ce5350
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_const_method_type.S
@@ -0,0 +1 @@
+%include "x86/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/x86/op_const_string.S b/runtime/interpreter/mterp/x86/op_const_string.S
index ff93b232d6..5553aab557 100644
--- a/runtime/interpreter/mterp/x86/op_const_string.S
+++ b/runtime/interpreter/mterp/x86/op_const_string.S
@@ -1,14 +1 @@
- /* const/string vAA, String@BBBB */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+%include "x86/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_custom.S b/runtime/interpreter/mterp/x86/op_invoke_custom.S
new file mode 100644
index 0000000000..eddd5b33a3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_invoke_custom.S
@@ -0,0 +1 @@
+%include "x86/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_custom_range.S b/runtime/interpreter/mterp/x86/op_invoke_custom_range.S
new file mode 100644
index 0000000000..1a4e884166
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_invoke_custom_range.S
@@ -0,0 +1 @@
+%include "x86/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_polymorphic.S b/runtime/interpreter/mterp/x86/op_invoke_polymorphic.S
new file mode 100644
index 0000000000..3907689476
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_invoke_polymorphic.S
@@ -0,0 +1 @@
+%include "x86/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/x86/op_invoke_polymorphic_range.S
new file mode 100644
index 0000000000..59a823076d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/op_invoke_polymorphic_range.S
@@ -0,0 +1 @@
+%include "x86/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/x86/op_unused_fe.S b/runtime/interpreter/mterp/x86/op_unused_fe.S
deleted file mode 100644
index 31d98c1f39..0000000000
--- a/runtime/interpreter/mterp/x86/op_unused_fe.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_ff.S b/runtime/interpreter/mterp/x86/op_unused_ff.S
deleted file mode 100644
index 31d98c1f39..0000000000
--- a/runtime/interpreter/mterp/x86/op_unused_ff.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/const.S b/runtime/interpreter/mterp/x86_64/const.S
new file mode 100644
index 0000000000..1ddf20fdca
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/const.S
@@ -0,0 +1,15 @@
+%default { "helper":"UndefinedConstHandler" }
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern $helper
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
+ movq rINSTq, OUT_ARG1
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL($helper) # (index, tgt_reg, shadow_frame, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/invoke_polymorphic.S b/runtime/interpreter/mterp/x86_64/invoke_polymorphic.S
new file mode 100644
index 0000000000..5157860b37
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/invoke_polymorphic.S
@@ -0,0 +1,22 @@
+%default { "helper":"UndefinedInvokeHandler" }
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern $helper
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST ${opnum}
+ movl rINST, OUT_32_ARG3
+ call SYMBOL($helper)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 4
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_const_class.S b/runtime/interpreter/mterp/x86_64/op_const_class.S
index 494920a4a8..0c402e1489 100644
--- a/runtime/interpreter/mterp/x86_64/op_const_class.S
+++ b/runtime/interpreter/mterp/x86_64/op_const_class.S
@@ -1,10 +1 @@
- /* const/class vAA, Class@BBBB */
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+%include "x86_64/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/x86_64/op_const_method_handle.S b/runtime/interpreter/mterp/x86_64/op_const_method_handle.S
new file mode 100644
index 0000000000..2b8b0a258a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_method_handle.S
@@ -0,0 +1 @@
+%include "x86_64/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/x86_64/op_const_method_type.S b/runtime/interpreter/mterp/x86_64/op_const_method_type.S
new file mode 100644
index 0000000000..33ce952031
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_const_method_type.S
@@ -0,0 +1 @@
+%include "x86_64/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/x86_64/op_const_string.S b/runtime/interpreter/mterp/x86_64/op_const_string.S
index 7c199ecad9..5a29bd3dde 100644
--- a/runtime/interpreter/mterp/x86_64/op_const_string.S
+++ b/runtime/interpreter/mterp/x86_64/op_const_string.S
@@ -1,10 +1 @@
- /* const/string vAA, String@BBBB */
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # OUT_ARG0 <- BBBB
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+%include "x86_64/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_custom.S b/runtime/interpreter/mterp/x86_64/op_invoke_custom.S
new file mode 100644
index 0000000000..f4011f6d86
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_custom.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_custom_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_custom_range.S
new file mode 100644
index 0000000000..94612c47d5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_custom_range.S
@@ -0,0 +1 @@
+%include "x86_64/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic.S b/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic.S
new file mode 100644
index 0000000000..452944536d
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic.S
@@ -0,0 +1 @@
+%include "x86_64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic_range.S
new file mode 100644
index 0000000000..01981c1b49
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic_range.S
@@ -0,0 +1 @@
+%include "x86_64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fe.S b/runtime/interpreter/mterp/x86_64/op_unused_fe.S
deleted file mode 100644
index 280615f08b..0000000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_fe.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_ff.S b/runtime/interpreter/mterp/x86_64/op_unused_ff.S
deleted file mode 100644
index 280615f08b..0000000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_ff.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index ce06a036ec..7a8ae9a9db 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -47,7 +47,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "nth_caller_visitor.h"
#include "reflection.h"
#include "thread-inl.h"
@@ -1751,12 +1751,6 @@ void UnstartedRuntime::UnstartedJNIStringIntern(
result->SetL(receiver->AsString()->Intern());
}
-void UnstartedRuntime::UnstartedJNIStringFastIndexOf(
- Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
- uint32_t* args, JValue* result) {
- result->SetI(receiver->AsString()->FastIndexOf(args[0], args[1]));
-}
-
void UnstartedRuntime::UnstartedJNIArrayCreateMultiArray(
Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args, JValue* result) {
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index 47910357d5..e7047c7372 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -94,7 +94,6 @@
V(ObjectNotifyAll, "void java.lang.Object.notifyAll()") \
V(StringCompareTo, "int java.lang.String.compareTo(java.lang.String)") \
V(StringIntern, "java.lang.String java.lang.String.intern()") \
- V(StringFastIndexOf, "int java.lang.String.fastIndexOf(int, int)") \
V(ArrayCreateMultiArray, "java.lang.Object java.lang.reflect.Array.createMultiArray(java.lang.Class, int[])") \
V(ArrayCreateObjectArray, "java.lang.Object java.lang.reflect.Array.createObjectArray(java.lang.Class, int)") \
V(ThrowableNativeFillInStackTrace, "java.lang.Object java.lang.Throwable.nativeFillInStackTrace()") \
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 73746e18ef..f8b82ed313 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -36,8 +36,8 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "nativebridge/native_bridge.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "nativeloader/native_loader.h"
#include "object_callbacks.h"
#include "parsed_options.h"
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 97a3b717e2..72b5a942fe 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -272,9 +272,12 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(!method->IsRuntimeMethod());
+ RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
// Don't compile the method if it has breakpoints.
- if (Dbg::IsDebuggerActive() && Dbg::MethodHasAnyBreakpoints(method)) {
- VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to breakpoint";
+ if (cb->IsMethodBeingInspected(method) && !cb->IsMethodSafeToJit(method)) {
+ VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
+ << " due to not being safe to jit according to runtime-callbacks. For example, there"
+ << " could be breakpoints in this method.";
return false;
}
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index d74cec325a..5164c85b60 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -49,7 +49,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "parsed_options.h"
#include "reflection.h"
#include "runtime.h"
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 3f00450319..1ecfe7cb76 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -24,7 +24,7 @@
#include "java_vm_ext.h"
#include "jni_env_ext.h"
#include "mirror/string-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
namespace art {
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index a3d90355ac..8198636b3d 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -148,12 +148,18 @@ TEST_F(DexCacheMethodHandlesTest, TestResolvedMethodTypes) {
const DexFile::MethodId& method1_id = dex_file.GetMethodId(method1->GetDexMethodIndex());
const DexFile::MethodId& method2_id = dex_file.GetMethodId(method2->GetDexMethodIndex());
-
Handle<mirror::MethodType> method1_type = hs.NewHandle(
- class_linker_->ResolveMethodType(dex_file, method1_id.proto_idx_, dex_cache, class_loader));
+ class_linker_->ResolveMethodType(soa.Self(),
+ dex_file,
+ method1_id.proto_idx_,
+ dex_cache,
+ class_loader));
Handle<mirror::MethodType> method2_type = hs.NewHandle(
- class_linker_->ResolveMethodType(dex_file, method2_id.proto_idx_, dex_cache, class_loader));
-
+ class_linker_->ResolveMethodType(soa.Self(),
+ dex_file,
+ method2_id.proto_idx_,
+ dex_cache,
+ class_loader));
EXPECT_EQ(method1_type.Get(), dex_cache->GetResolvedMethodType(method1_id.proto_idx_));
EXPECT_EQ(method2_type.Get(), dex_cache->GetResolvedMethodType(method2_id.proto_idx_));
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 7823413c09..32201d97dd 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -44,7 +44,8 @@ namespace art {
using android::base::StringPrintf;
-static constexpr uint64_t kLongWaitMs = 100;
+static constexpr uint64_t kDebugThresholdFudgeFactor = kIsDebugBuild ? 10 : 1;
+static constexpr uint64_t kLongWaitMs = 100 * kDebugThresholdFudgeFactor;
/*
* Every Object has a monitor associated with it, but not every Object is actually locked. Even
@@ -78,8 +79,12 @@ uint32_t Monitor::stack_dump_lock_profiling_threshold_ = 0;
void Monitor::Init(uint32_t lock_profiling_threshold,
uint32_t stack_dump_lock_profiling_threshold) {
- lock_profiling_threshold_ = lock_profiling_threshold;
- stack_dump_lock_profiling_threshold_ = stack_dump_lock_profiling_threshold;
+ // It isn't great to always include the debug build fudge factor for command-
+ // line driven arguments, but it's easier to adjust here than in the build.
+ lock_profiling_threshold_ =
+ lock_profiling_threshold * kDebugThresholdFudgeFactor;
+ stack_dump_lock_profiling_threshold_ =
+ stack_dump_lock_profiling_threshold * kDebugThresholdFudgeFactor;
}
Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
@@ -453,7 +458,7 @@ void Monitor::Lock(Thread* self) {
// Acquire thread-list lock to find thread and keep it from dying until we've got all
// the info we need.
{
- MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
+ Locks::thread_list_lock_->ExclusiveLock(Thread::Current());
// Re-find the owner in case the thread got killed.
Thread* original_owner = Runtime::Current()->GetThreadList()->FindThreadByThreadId(
@@ -475,9 +480,15 @@ void Monitor::Lock(Thread* self) {
std::ostringstream oss;
};
CollectStackTrace owner_trace;
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its
+ // execution.
original_owner->RequestSynchronousCheckpoint(&owner_trace);
owner_stack_dump = owner_trace.oss.str();
+ } else {
+ Locks::thread_list_lock_->ExclusiveUnlock(Thread::Current());
}
+ } else {
+ Locks::thread_list_lock_->ExclusiveUnlock(Thread::Current());
}
// This is all the data we need. Now drop the thread-list lock, it's OK for the
// owner to go away now.
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index e75d097220..4ab8908ff3 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -33,8 +33,8 @@
#include "mirror/string.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "oat_file.h"
#include "oat_file_assistant.h"
#include "oat_file_manager.h"
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 70dd5cb56d..2663bea344 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -40,8 +40,8 @@
#include "mirror/class.h"
#include "mirror/object_array-inl.h"
#include "native_util.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "scoped_fast_native_object_access-inl.h"
#include "trace.h"
#include "well_known_classes.h"
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 4db9feb518..0bbd1ece80 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -22,12 +22,7 @@
extern "C" void android_set_application_target_sdk_version(uint32_t version);
#endif
#include <limits.h>
-#include "nativehelper/ScopedUtfChars.h"
-
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wshadow"
-#include "nativehelper/toStringArray.h"
-#pragma GCC diagnostic pop
+#include "nativehelper/scoped_utf_chars.h"
#include "android-base/stringprintf.h"
@@ -53,11 +48,13 @@ extern "C" void android_set_application_target_sdk_version(uint32_t version);
#include "mirror/object-inl.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
+#include "nativehelper/scoped_local_ref.h"
#include "runtime.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "thread_list.h"
+#include "well_known_classes.h"
namespace art {
@@ -166,7 +163,27 @@ static jboolean VMRuntime_isNativeDebuggable(JNIEnv*, jobject) {
}
static jobjectArray VMRuntime_properties(JNIEnv* env, jobject) {
- return toStringArray(env, Runtime::Current()->GetProperties());
+ DCHECK(WellKnownClasses::java_lang_String != nullptr);
+
+ const std::vector<std::string>& properties = Runtime::Current()->GetProperties();
+ ScopedLocalRef<jobjectArray> ret(env,
+ env->NewObjectArray(static_cast<jsize>(properties.size()),
+ WellKnownClasses::java_lang_String,
+ nullptr /* initial element */));
+ if (ret == nullptr) {
+ DCHECK(env->ExceptionCheck());
+ return nullptr;
+ }
+ for (size_t i = 0; i != properties.size(); ++i) {
+ ScopedLocalRef<jstring> str(env, env->NewStringUTF(properties[i].c_str()));
+ if (str == nullptr) {
+ DCHECK(env->ExceptionCheck());
+ return nullptr;
+ }
+ env->SetObjectArrayElement(ret.get(), static_cast<jsize>(i), str.get());
+ DCHECK(!env->ExceptionCheck());
+ }
+ return ret.release();
}
// This is for backward compatibility with dalvik which returned the
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index e40a071223..b436e0df67 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -22,14 +22,14 @@
#include "arch/instruction_set.h"
#include "art_method-inl.h"
+#include "base/logging.h"
#include "debugger.h"
#include "java_vm_ext.h"
#include "jit/jit.h"
#include "jni_internal.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/JNIHelp.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "non_debuggable_classes.h"
#include "oat_file.h"
#include "oat_file_manager.h"
@@ -49,7 +49,8 @@ namespace art {
// Set to true to always determine the non-debuggable classes even if we would not allow a debugger
// to actually attach.
-static constexpr bool kAlwaysCollectNonDebuggableClasses = kIsDebugBuild;
+static bool kAlwaysCollectNonDebuggableClasses =
+ RegisterRuntimeDebugFlag(&kAlwaysCollectNonDebuggableClasses);
using android::base::StringPrintf;
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 1a19940993..9359ffc7fd 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -35,8 +35,8 @@
#include "mirror/string-inl.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "nth_caller_visitor.h"
#include "obj_ptr-inl.h"
#include "reflection.h"
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index e2de141808..9295ff7071 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -25,7 +25,7 @@
#include "mirror/string-inl.h"
#include "mirror/string.h"
#include "native_util.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "verify_object.h"
@@ -68,13 +68,6 @@ static jstring String_concat(JNIEnv* env, jobject java_this, jobject java_string
return reinterpret_cast<jstring>(string_original);
}
-static jint String_fastIndexOf(JNIEnv* env, jobject java_this, jint ch, jint start) {
- ScopedFastNativeObjectAccess soa(env);
- // This method does not handle supplementary characters. They're dealt with in managed code.
- DCHECK_LE(ch, 0xffff);
- return soa.Decode<mirror::String>(java_this)->FastIndexOf(ch, start);
-}
-
static jstring String_fastSubstring(JNIEnv* env, jobject java_this, jint start, jint length) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<1> hs(soa.Self());
@@ -121,7 +114,6 @@ static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(String, compareTo, "(Ljava/lang/String;)I"),
FAST_NATIVE_METHOD(String, concat, "(Ljava/lang/String;)Ljava/lang/String;"),
FAST_NATIVE_METHOD(String, doReplace, "(CC)Ljava/lang/String;"),
- FAST_NATIVE_METHOD(String, fastIndexOf, "(II)I"),
FAST_NATIVE_METHOD(String, fastSubstring, "(II)Ljava/lang/String;"),
FAST_NATIVE_METHOD(String, getCharsNoCheck, "(II[CI)V"),
FAST_NATIVE_METHOD(String, intern, "()Ljava/lang/String;"),
diff --git a/runtime/native/java_lang_StringFactory.cc b/runtime/native/java_lang_StringFactory.cc
index 2db9a5cc22..136a02f8f6 100644
--- a/runtime/native/java_lang_StringFactory.cc
+++ b/runtime/native/java_lang_StringFactory.cc
@@ -22,8 +22,8 @@
#include "mirror/string.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedPrimitiveArray.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_primitive_array.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 94007ffa1e..a717264bcb 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -22,7 +22,7 @@
#include "monitor.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 413149c510..5130ad50e4 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -23,8 +23,8 @@
#include "mirror/object-inl.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "obj_ptr.h"
#include "scoped_fast_native_object_access-inl.h"
#include "well_known_classes.h"
diff --git a/runtime/native/libcore_util_CharsetUtils.cc b/runtime/native/libcore_util_CharsetUtils.cc
index 9743c9413d..f3aba2575b 100644
--- a/runtime/native/libcore_util_CharsetUtils.cc
+++ b/runtime/native/libcore_util_CharsetUtils.cc
@@ -22,7 +22,7 @@
#include "mirror/string-inl.h"
#include "mirror/string.h"
#include "native_util.h"
-#include "nativehelper/ScopedPrimitiveArray.h"
+#include "nativehelper/scoped_primitive_array.h"
#include "nativehelper/jni_macros.h"
#include "scoped_fast_native_object_access-inl.h"
#include "unicode/utf16.h"
diff --git a/runtime/native/native_util.h b/runtime/native/native_util.h
index 593b3ca444..784dba319e 100644
--- a/runtime/native/native_util.h
+++ b/runtime/native/native_util.h
@@ -21,7 +21,7 @@
#include "android-base/logging.h"
#include "base/macros.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
namespace art {
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
index c3e74bd112..f8f4b1f0ad 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
@@ -21,7 +21,7 @@
#include "jni_internal.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedPrimitiveArray.h"
+#include "nativehelper/scoped_primitive_array.h"
#include "scoped_fast_native_object_access-inl.h"
namespace art {
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 8c42973509..dd98e25932 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -23,8 +23,8 @@
#include "jni_internal.h"
#include "native_util.h"
#include "nativehelper/jni_macros.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedPrimitiveArray.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_primitive_array.h"
#include "scoped_fast_native_object_access-inl.h"
#include "thread_list.h"
diff --git a/runtime/non_debuggable_classes.cc b/runtime/non_debuggable_classes.cc
index 871ffba2a4..7db199cd06 100644
--- a/runtime/non_debuggable_classes.cc
+++ b/runtime/non_debuggable_classes.cc
@@ -19,7 +19,7 @@
#include "base/logging.h"
#include "jni_internal.h"
#include "mirror/class-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr-inl.h"
#include "thread-current-inl.h"
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index b3c3aa05f3..1269dcad93 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -77,9 +77,6 @@ static constexpr bool kUseDlopenOnHost = true;
// For debugging, Open will print DlOpen error message if set to true.
static constexpr bool kPrintDlOpenErrorMessage = false;
-// If true, we advise the kernel about dex file mem map accesses.
-static constexpr bool kMadviseDexFileAccesses = true;
-
// Note for OatFileBase and descendents:
//
// These are used in OatFile::Open to try all our loaders.
@@ -555,7 +552,7 @@ bool OatFileBase::Setup(const char* abs_dex_location, std::string* error_msg) {
const uint8_t* dex_file_pointer = DexBegin() + dex_file_offset;
- const bool valid_magic = StandardDexFile::IsMagicValid(dex_file_pointer);
+ const bool valid_magic = DexFileLoader::IsMagicValid(dex_file_pointer);
if (UNLIKELY(!valid_magic)) {
*error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with invalid "
"dex file magic '%s'",
@@ -565,7 +562,7 @@ bool OatFileBase::Setup(const char* abs_dex_location, std::string* error_msg) {
dex_file_pointer);
return false;
}
- if (UNLIKELY(!StandardDexFile::IsVersionValid(dex_file_pointer))) {
+ if (UNLIKELY(!DexFileLoader::IsVersionAndMagicValid(dex_file_pointer))) {
*error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with invalid "
"dex file version '%s'",
GetLocation().c_str(),
@@ -1651,20 +1648,19 @@ const DexFile::ClassDef* OatFile::OatDexFile::FindClassDef(const DexFile& dex_fi
// Madvise the dex file based on the state we are moving to.
void OatDexFile::MadviseDexFile(const DexFile& dex_file, MadviseState state) {
- const bool low_ram = Runtime::Current()->GetHeap()->IsLowMemoryMode();
+ Runtime* const runtime = Runtime::Current();
+ const bool low_ram = runtime->GetHeap()->IsLowMemoryMode();
// TODO: Also do madvise hints for non low ram devices.
- if (!kMadviseDexFileAccesses || !low_ram) {
+ if (!low_ram) {
return;
}
- if (state == MadviseState::kMadviseStateAtLoad) {
- if (low_ram) {
- // Default every dex file to MADV_RANDOM when its loaded by default for low ram devices.
- // Other devices have enough page cache to get performance benefits from loading more pages
- // into the page cache.
- MadviseLargestPageAlignedRegion(dex_file.Begin(),
- dex_file.Begin() + dex_file.Size(),
- MADV_RANDOM);
- }
+ if (state == MadviseState::kMadviseStateAtLoad && runtime->MAdviseRandomAccess()) {
+ // Default every dex file to MADV_RANDOM when its loaded by default for low ram devices.
+ // Other devices have enough page cache to get performance benefits from loading more pages
+ // into the page cache.
+ MadviseLargestPageAlignedRegion(dex_file.Begin(),
+ dex_file.Begin() + dex_file.Size(),
+ MADV_RANDOM);
}
const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
if (oat_dex_file != nullptr) {
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 1e7cf723dc..940195c0f4 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -597,8 +597,12 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
if (oat_file_assistant.HasOriginalDexFiles()) {
if (Runtime::Current()->IsDexFileFallbackEnabled()) {
static constexpr bool kVerifyChecksum = true;
- if (!DexFileLoader::Open(
- dex_location, dex_location, kVerifyChecksum, /*out*/ &error_msg, &dex_files)) {
+ if (!DexFileLoader::Open(dex_location,
+ dex_location,
+ Runtime::Current()->IsVerificationEnabled(),
+ kVerifyChecksum,
+ /*out*/ &error_msg,
+ &dex_files)) {
LOG(WARNING) << error_msg;
error_msgs->push_back("Failed to open dex files from " + std::string(dex_location)
+ " because: " + error_msg);
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 9888186ed0..71d7b6c34d 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -121,7 +121,7 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.WithType<double>().WithRange(0.1, 0.9)
.IntoKey(M::HeapTargetUtilization)
.Define("-XX:ForegroundHeapGrowthMultiplier=_")
- .WithType<double>().WithRange(0.1, 1.0)
+ .WithType<double>().WithRange(0.1, 5.0)
.IntoKey(M::ForegroundHeapGrowthMultiplier)
.Define("-XX:ParallelGCThreads=_")
.WithType<unsigned int>()
@@ -159,6 +159,10 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.WithType<bool>()
.WithValueMap({{"false", false}, {"true", true}})
.IntoKey(M::DumpNativeStackOnSigQuit)
+ .Define("-XX:MadviseRandomAccess:_")
+ .WithType<bool>()
+ .WithValueMap({{"false", false}, {"true", true}})
+ .IntoKey(M::MadviseRandomAccess)
.Define("-Xusejit:_")
.WithType<bool>()
.WithValueMap({{"false", false}, {"true", true}})
@@ -717,6 +721,7 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -XX:LargeObjectSpace={disabled,map,freelist}\n");
UsageMessage(stream, " -XX:LargeObjectThreshold=N\n");
UsageMessage(stream, " -XX:DumpNativeStackOnSigQuit=booleanvalue\n");
+ UsageMessage(stream, " -XX:MadviseRandomAccess:booleanvalue\n");
UsageMessage(stream, " -XX:SlowDebug={false,true}\n");
UsageMessage(stream, " -Xmethod-trace\n");
UsageMessage(stream, " -Xmethod-trace-file:filename");
diff --git a/runtime/plugin.cc b/runtime/plugin.cc
index 731967c738..6aa078771b 100644
--- a/runtime/plugin.cc
+++ b/runtime/plugin.cc
@@ -74,10 +74,8 @@ bool Plugin::Unload() {
LOG(WARNING) << this << " does not include a deinitialization function";
}
dlopen_handle_ = nullptr;
- if (dlclose(handle) != 0) {
- LOG(ERROR) << this << " failed to dlclose: " << dlerror();
- ret = false;
- }
+ // Don't bother to actually dlclose since we are shutting down anyway and there might be small
+ // amounts of processing still being done.
return ret;
}
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index f28f0cabe2..9683cedd4d 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -28,7 +28,7 @@
#include "mirror/class-inl.h"
#include "mirror/executable.h"
#include "mirror/object_array-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "nth_caller_visitor.h"
#include "scoped_thread_state_change-inl.h"
#include "stack_reference.h"
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index fa2f1e5793..7794872c83 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -24,7 +24,7 @@
#include "common_compiler_test.h"
#include "java_vm_ext.h"
#include "jni_internal.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
namespace art {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a4ed21e450..c88799cc28 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -134,9 +134,7 @@
#include "native/sun_misc_Unsafe.h"
#include "native_bridge_art_interface.h"
#include "native_stack_dump.h"
-#include "nativehelper/JniConstants.h"
-#include "nativehelper/JniConstants-priv.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "oat_file.h"
#include "oat_file_manager.h"
#include "object_callbacks.h"
@@ -174,6 +172,11 @@ static constexpr double kLowMemoryMinLoadFactor = 0.5;
static constexpr double kLowMemoryMaxLoadFactor = 0.8;
static constexpr double kNormalMinLoadFactor = 0.4;
static constexpr double kNormalMaxLoadFactor = 0.7;
+
+// Extra added to the default heap growth multiplier. Used to adjust the GC ergonomics for the read
+// barrier config.
+static constexpr double kExtraDefaultHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
+
Runtime* Runtime::instance_ = nullptr;
struct TraceConfig {
@@ -407,10 +410,6 @@ Runtime::~Runtime() {
// instance. We rely on a small initialization order issue in Runtime::Start() that requires
// elements of WellKnownClasses to be null, see b/65500943.
WellKnownClasses::Clear();
-
- // Ensure that libnativehelper caching is invalidated, in case a new runtime is to be brought
- // up later.
- android::ClearJniConstantsCache();
}
struct AbortState {
@@ -508,6 +507,10 @@ void Runtime::Abort(const char* msg) {
UNUSED(old_value);
#endif
+#ifdef ART_TARGET_ANDROID
+ android_set_abort_message(msg);
+#endif
+
// Ensure that we don't have multiple threads trying to abort at once,
// which would result in significantly worse diagnostics.
MutexLock mu(Thread::Current(), *Locks::abort_lock_);
@@ -1021,7 +1024,12 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
continue;
}
- if (!DexFileLoader::Open(dex_filename, dex_location, kVerifyChecksum, &error_msg, dex_files)) {
+ if (!DexFileLoader::Open(dex_filename,
+ dex_location,
+ Runtime::Current()->IsVerificationEnabled(),
+ kVerifyChecksum,
+ &error_msg,
+ dex_files)) {
LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
++failure_count;
}
@@ -1143,6 +1151,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
zygote_max_failed_boots_ = runtime_options.GetOrDefault(Opt::ZygoteMaxFailedBoots);
experimental_flags_ = runtime_options.GetOrDefault(Opt::Experimental);
is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode);
+ madvise_random_access_ = runtime_options.GetOrDefault(Opt::MadviseRandomAccess);
plugins_ = runtime_options.ReleaseOrDefault(Opt::Plugins);
agents_ = runtime_options.ReleaseOrDefault(Opt::AgentPath);
@@ -1151,13 +1160,22 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
// agents_.push_back(lib);
// }
+ float foreground_heap_growth_multiplier;
+ if (is_low_memory_mode_ && !runtime_options.Exists(Opt::ForegroundHeapGrowthMultiplier)) {
+ // If low memory mode, use 1.0 as the multiplier by default.
+ foreground_heap_growth_multiplier = 1.0f;
+ } else {
+ foreground_heap_growth_multiplier =
+ runtime_options.GetOrDefault(Opt::ForegroundHeapGrowthMultiplier) +
+ kExtraDefaultHeapGrowthMultiplier;
+ }
XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
runtime_options.GetOrDefault(Opt::HeapGrowthLimit),
runtime_options.GetOrDefault(Opt::HeapMinFree),
runtime_options.GetOrDefault(Opt::HeapMaxFree),
runtime_options.GetOrDefault(Opt::HeapTargetUtilization),
- runtime_options.GetOrDefault(Opt::ForegroundHeapGrowthMultiplier),
+ foreground_heap_growth_multiplier,
runtime_options.GetOrDefault(Opt::MemoryMaximumSize),
runtime_options.GetOrDefault(Opt::NonMovingSpaceCapacity),
runtime_options.GetOrDefault(Opt::Image),
@@ -1533,11 +1551,7 @@ void Runtime::InitNativeMethods() {
// Must be in the kNative state for calling native methods (JNI_OnLoad code).
CHECK_EQ(self->GetState(), kNative);
- // First set up JniConstants, which is used by both the runtime's built-in native
- // methods and libcore.
- JniConstants::init(env);
-
- // Then set up the native methods provided by the runtime itself.
+ // Set up the native methods provided by the runtime itself.
RegisterRuntimeNativeMethods(env);
// Initialize classes used in JNI. The initialization requires runtime native
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 399e1c1622..9f79a01aa8 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -682,6 +682,12 @@ class Runtime {
return result;
}
+ // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
+ // This is beneficial for low RAM devices since it reduces page cache thrashing.
+ bool MAdviseRandomAccess() const {
+ return madvise_random_access_;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -916,6 +922,10 @@ class Runtime {
// Whether or not we are on a low RAM device.
bool is_low_memory_mode_;
+ // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
+ // This is beneficial for low RAM devices since it reduces page cache thrashing.
+ bool madvise_random_access_;
+
// Whether the application should run in safe mode, that is, interpreter only.
bool safe_mode_;
diff --git a/runtime/runtime_callbacks.cc b/runtime/runtime_callbacks.cc
index f164f7c8ec..339fe822fd 100644
--- a/runtime/runtime_callbacks.cc
+++ b/runtime/runtime_callbacks.cc
@@ -43,6 +43,17 @@ void RuntimeCallbacks::RemoveMethodInspectionCallback(MethodInspectionCallback*
Remove(cb, &method_inspection_callbacks_);
}
+bool RuntimeCallbacks::IsMethodSafeToJit(ArtMethod* m) {
+ for (MethodInspectionCallback* cb : method_inspection_callbacks_) {
+ if (!cb->IsMethodSafeToJit(m)) {
+ DCHECK(cb->IsMethodBeingInspected(m))
+ << "Contract requires that !IsMethodSafeToJit(m) -> IsMethodBeingInspected(m)";
+ return false;
+ }
+ }
+ return true;
+}
+
bool RuntimeCallbacks::IsMethodBeingInspected(ArtMethod* m) {
for (MethodInspectionCallback* cb : method_inspection_callbacks_) {
if (cb->IsMethodBeingInspected(m)) {
diff --git a/runtime/runtime_callbacks.h b/runtime/runtime_callbacks.h
index c9360491bb..c1ba9643a7 100644
--- a/runtime/runtime_callbacks.h
+++ b/runtime/runtime_callbacks.h
@@ -104,6 +104,11 @@ class MethodInspectionCallback {
// Returns true if the method is being inspected currently and the runtime should not modify it in
// potentially dangerous ways (i.e. replace with compiled version, JIT it, etc).
virtual bool IsMethodBeingInspected(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+ // Returns true if the method is safe to Jit, false otherwise.
+ // Note that '!IsMethodSafeToJit(m) implies IsMethodBeingInspected(m)'. That is that if this
+ // method returns false IsMethodBeingInspected must return true.
+ virtual bool IsMethodSafeToJit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
class RuntimeCallbacks {
@@ -167,6 +172,11 @@ class RuntimeCallbacks {
// on by some code.
bool IsMethodBeingInspected(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns false if some MethodInspectionCallback indicates the method cannot be safetly jitted
+ // (which implies that it is being Inspected). Returns true otherwise. If it returns false the
+ // entrypoint should not be changed to JITed code.
+ bool IsMethodSafeToJit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+
void AddMethodInspectionCallback(MethodInspectionCallback* cb)
REQUIRES_SHARED(Locks::mutator_lock_);
void RemoveMethodInspectionCallback(MethodInspectionCallback* cb)
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index ef172586cf..0b69851a55 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -38,7 +38,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "monitor.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index cafae22e8c..2e03562505 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -70,6 +70,7 @@ RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseT
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
RUNTIME_OPTIONS_KEY (bool, UseJitCompilation, false)
RUNTIME_OPTIONS_KEY (bool, DumpNativeStackOnSigQuit, true)
+RUNTIME_OPTIONS_KEY (bool, MadviseRandomAccess, false)
RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold)
RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold)
RUNTIME_OPTIONS_KEY (unsigned int, JITOsrThreshold)
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index b50879f0a9..a1f14be0f1 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -170,7 +170,7 @@ void SignalCatcher::Output(const std::string& s) {
#if defined(ART_TARGET_ANDROID)
if (use_tombstoned_stack_trace_fd_ && !tombstoned_notify_completion(tombstone_fd)) {
- LOG(WARNING) << "Unable to notify tombstoned of dump completion.";
+ PLOG(WARNING) << "Unable to notify tombstoned of dump completion";
}
#endif
}
diff --git a/runtime/standard_dex_file.h b/runtime/standard_dex_file.h
index 1ec06edef3..906b0b7b8e 100644
--- a/runtime/standard_dex_file.h
+++ b/runtime/standard_dex_file.h
@@ -40,13 +40,18 @@ class StandardDexFile : public DexFile {
static bool IsVersionValid(const uint8_t* magic);
virtual bool IsVersionValid() const OVERRIDE;
+ bool IsStandardDexFile() const OVERRIDE {
+ return true;
+ }
+
private:
StandardDexFile(const uint8_t* base,
size_t size,
const std::string& location,
uint32_t location_checksum,
- const OatDexFile* oat_dex_file)
- : DexFile(base, size, location, location_checksum, oat_dex_file) {}
+ const OatDexFile* oat_dex_file,
+ DexFileContainer* container)
+ : DexFile(base, size, location, location_checksum, oat_dex_file, container) {}
friend class DexFileLoader;
friend class DexFileVerifierTest;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 2753bf71eb..47ffb4e13f 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -70,8 +70,8 @@
#include "mirror/stack_trace_element.h"
#include "monitor.h"
#include "native_stack_dump.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "nth_caller_visitor.h"
#include "oat_quick_method_header.h"
#include "obj_ptr-inl.h"
@@ -1451,21 +1451,25 @@ class BarrierClosure : public Closure {
Barrier barrier_;
};
+// RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
bool Thread::RequestSynchronousCheckpoint(Closure* function) {
+ Thread* self = Thread::Current();
if (this == Thread::Current()) {
+ Locks::thread_list_lock_->AssertExclusiveHeld(self);
+ // Unlock the tll before running so that the state is the same regardless of thread.
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
// Asked to run on this thread. Just run.
function->Run(this);
return true;
}
- Thread* self = Thread::Current();
// The current thread is not this thread.
if (GetState() == ThreadState::kTerminated) {
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
return false;
}
- // Note: we're holding the thread-list lock. The thread cannot die at this point.
struct ScopedThreadListLockUnlock {
explicit ScopedThreadListLockUnlock(Thread* self_in) RELEASE(*Locks::thread_list_lock_)
: self_thread(self_in) {
@@ -1482,6 +1486,7 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function) {
};
for (;;) {
+ Locks::thread_list_lock_->AssertExclusiveHeld(self);
// If this thread is runnable, try to schedule a checkpoint. Do some gymnastics to not hold the
// suspend-count lock for too long.
if (GetState() == ThreadState::kRunnable) {
@@ -1492,8 +1497,9 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function) {
installed = RequestCheckpoint(&barrier_closure);
}
if (installed) {
- // Relinquish the thread-list lock, temporarily. We should not wait holding any locks.
- ScopedThreadListLockUnlock stllu(self);
+ // Relinquish the thread-list lock. We should not wait holding any locks. We cannot
+ // reacquire it since we don't know if 'this' hasn't been deleted yet.
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
ScopedThreadSuspension sts(self, ThreadState::kWaiting);
barrier_closure.Wait(self);
return true;
@@ -1515,6 +1521,8 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function) {
}
{
+ // Release for the wait. The suspension will keep us from being deleted. Reacquire after so
+ // that we can call ModifySuspendCount without racing against ThreadList::Unregister.
ScopedThreadListLockUnlock stllu(self);
{
ScopedThreadSuspension sts(self, ThreadState::kWaiting);
@@ -1543,6 +1551,9 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function) {
Thread::resume_cond_->Broadcast(self);
}
+ // Release the thread_list_lock_ to be consistent with the barrier-closure path.
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
+
return true; // We're done, break out of the loop.
}
}
diff --git a/runtime/thread.h b/runtime/thread.h
index ab89778bf9..42b38da0b9 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -265,9 +265,13 @@ class Thread {
bool RequestCheckpoint(Closure* function)
REQUIRES(Locks::thread_suspend_count_lock_);
+
+ // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
+ // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
+ // execute the checkpoint for us if it is Runnable.
bool RequestSynchronousCheckpoint(Closure* function)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(Locks::thread_list_lock_)
+ RELEASE(Locks::thread_list_lock_)
REQUIRES(!Locks::thread_suspend_count_lock_);
bool RequestEmptyCheckpoint()
REQUIRES(Locks::thread_suspend_count_lock_);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 488e4a6517..88f1fc6991 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -21,11 +21,12 @@
#include <unistd.h>
#include <sstream>
+#include <vector>
#include "android-base/stringprintf.h"
#include "backtrace/BacktraceMap.h"
-#include "nativehelper/ScopedLocalRef.h"
-#include "nativehelper/ScopedUtfChars.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "base/histogram-inl.h"
#include "base/mutex-inl.h"
@@ -204,7 +205,11 @@ class DumpCheckpoint FINAL : public Closure {
: os_(os),
barrier_(0),
backtrace_map_(dump_native_stack ? BacktraceMap::Create(getpid()) : nullptr),
- dump_native_stack_(dump_native_stack) {}
+ dump_native_stack_(dump_native_stack) {
+ if (backtrace_map_ != nullptr) {
+ backtrace_map_->SetSuffixesToIgnore(std::vector<std::string> { "oat", "odex" });
+ }
+ }
void Run(Thread* thread) OVERRIDE {
// Note thread and self may not be equal if thread was already suspended at the point of the
diff --git a/runtime/ti/agent.cc b/runtime/ti/agent.cc
index 6ff966678a..20e297c991 100644
--- a/runtime/ti/agent.cc
+++ b/runtime/ti/agent.cc
@@ -113,7 +113,8 @@ void Agent::Unload() {
if (onunload_ != nullptr) {
onunload_(Runtime::Current()->GetJavaVM());
}
- dlclose(dlopen_handle_);
+ // Don't actually dlclose since some agents assume they will never get unloaded. Since this only
+ // happens when the runtime is shutting down anyway this isn't a big deal.
dlopen_handle_ = nullptr;
onload_ = nullptr;
onattach_ = nullptr;
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 4c3fa20c9d..4b5a7610a3 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -39,7 +39,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "os.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 55bc9ecac5..955098d8c2 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -119,7 +119,9 @@ std::unique_ptr<VdexFile> VdexFile::Open(int file_fd,
if (!vdex->OpenAllDexFiles(&unique_ptr_dex_files, error_msg)) {
return nullptr;
}
- Unquicken(MakeNonOwningPointerVector(unique_ptr_dex_files), vdex->GetQuickeningInfo());
+ Unquicken(MakeNonOwningPointerVector(unique_ptr_dex_files),
+ vdex->GetQuickeningInfo(),
+ /* decompile_return_instruction */ false);
// Update the quickening info size to pretend there isn't any.
reinterpret_cast<Header*>(vdex->mmap_->Begin())->quickening_info_size_ = 0;
}
@@ -218,23 +220,55 @@ class QuickeningInfoIterator {
};
void VdexFile::Unquicken(const std::vector<const DexFile*>& dex_files,
- const ArrayRef<const uint8_t>& quickening_info) {
- if (quickening_info.size() == 0) {
- // Bail early if there is no quickening info.
+ const ArrayRef<const uint8_t>& quickening_info,
+ bool decompile_return_instruction) {
+ if (quickening_info.size() == 0 && !decompile_return_instruction) {
+ // Bail early if there is no quickening info and no need to decompile
+ // RETURN_VOID_NO_BARRIER instructions to RETURN_VOID instructions.
return;
}
- // We do not decompile a RETURN_VOID_NO_BARRIER into a RETURN_VOID, as the quickening
- // optimization does not depend on the boot image (the optimization relies on not
- // having final fields in a class, which does not change for an app).
- constexpr bool kDecompileReturnInstruction = false;
- for (uint32_t i = 0; i < dex_files.size(); ++i) {
- for (QuickeningInfoIterator it(i, dex_files.size(), quickening_info);
- !it.Done();
- it.Advance()) {
- optimizer::ArtDecompileDEX(
- *dex_files[i]->GetCodeItem(it.GetCurrentCodeItemOffset()),
- it.GetCurrentQuickeningInfo(),
- kDecompileReturnInstruction);
+
+ // When we do not decompile RETURN_VOID_NO_BARRIER use the faster
+ // QuickeningInfoIterator, otherwise use the slower ClassDataItemIterator
+ if (!decompile_return_instruction) {
+ for (uint32_t i = 0; i < dex_files.size(); ++i) {
+ for (QuickeningInfoIterator it(i, dex_files.size(), quickening_info);
+ !it.Done();
+ it.Advance()) {
+ optimizer::ArtDecompileDEX(
+ *dex_files[i]->GetCodeItem(it.GetCurrentCodeItemOffset()),
+ it.GetCurrentQuickeningInfo(),
+ decompile_return_instruction);
+ }
+ }
+ } else {
+ for (uint32_t i = 0; i < dex_files.size(); ++i) {
+ QuickeningInfoIterator quick_it(i, dex_files.size(), quickening_info);
+ for (uint32_t j = 0; j < dex_files[i]->NumClassDefs(); ++j) {
+ const DexFile::ClassDef& class_def = dex_files[i]->GetClassDef(j);
+ const uint8_t* class_data = dex_files[i]->GetClassData(class_def);
+ if (class_data != nullptr) {
+ for (ClassDataItemIterator class_it(*dex_files[i], class_data);
+ class_it.HasNext();
+ class_it.Next()) {
+ if (class_it.IsAtMethod() && class_it.GetMethodCodeItem() != nullptr) {
+ uint32_t offset = class_it.GetMethodCodeItemOffset();
+ if (!quick_it.Done() && offset == quick_it.GetCurrentCodeItemOffset()) {
+ optimizer::ArtDecompileDEX(
+ *class_it.GetMethodCodeItem(),
+ quick_it.GetCurrentQuickeningInfo(),
+ decompile_return_instruction);
+ quick_it.Advance();
+ } else {
+ optimizer::ArtDecompileDEX(*class_it.GetMethodCodeItem(),
+ /* quickened_info */ {},
+ decompile_return_instruction);
+ }
+ }
+ }
+ }
+ }
+ DCHECK(quick_it.Done()) << "Failed to use all quickening info";
}
}
}
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 63058cfe6f..11f1f527c1 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -87,6 +87,8 @@ class VdexFile {
typedef uint32_t VdexChecksum;
+ explicit VdexFile(MemMap* mmap) : mmap_(mmap) {}
+
// Returns nullptr if the vdex file cannot be opened or is not valid.
static std::unique_ptr<VdexFile> Open(const std::string& vdex_filename,
bool writable,
@@ -143,8 +145,12 @@ class VdexFile {
std::string* error_msg);
// In-place unquicken the given `dex_files` based on `quickening_info`.
+ // `decompile_return_instruction` controls if RETURN_VOID_BARRIER instructions are
+ // decompiled to RETURN_VOID instructions using the slower ClassDataItemIterator
+ // instead of the faster QuickeningInfoIterator.
static void Unquicken(const std::vector<const DexFile*>& dex_files,
- const ArrayRef<const uint8_t>& quickening_info);
+ const ArrayRef<const uint8_t>& quickening_info,
+ bool decompile_return_instruction);
// Fully unquicken `target_dex_file` based on quickening info stored
// in this vdex file for `original_dex_file`.
@@ -155,8 +161,6 @@ class VdexFile {
const uint8_t* GetQuickenedInfoOf(const DexFile& dex_file, uint32_t code_item_offset) const;
private:
- explicit VdexFile(MemMap* mmap) : mmap_(mmap) {}
-
bool HasDexSection() const {
return GetHeader().GetDexSize() != 0;
}
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 74d11a32de..bfcd95c846 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -27,7 +27,7 @@
#include "jni_internal.h"
#include "mirror/class.h"
#include "mirror/throwable.h"
-#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
diff --git a/test/004-ThreadStress/src/Main.java b/test/004-ThreadStress/src/Main.java
index 35f394cff9..a9e0faf500 100644
--- a/test/004-ThreadStress/src/Main.java
+++ b/test/004-ThreadStress/src/Main.java
@@ -462,7 +462,69 @@ public class Main implements Runnable {
permits = 3;
}
- return new Semaphore(permits, /* fair */ true);
+ Semaphore semaphore = new Semaphore(permits, /* fair */ true);
+ forceTransitiveClassInitialization(semaphore, permits);
+ return semaphore;
+ }
+
+ // Force ahead-of-time initialization of classes used by Semaphore
+ // code. Try to exercise all code paths likely to be taken during
+ // the actual test later (including having a thread blocking on
+ // the semaphore trying to acquire a permit), so that we increase
+ // the chances to initialize all classes indirectly used by
+ // QueuedWait (e.g. AbstractQueuedSynchronizer$Node).
+ private static void forceTransitiveClassInitialization(Semaphore semaphore, final int permits) {
+ // Ensure `semaphore` has the expected number of permits
+ // before we start.
+ assert semaphore.availablePermits() == permits;
+
+ // Let the main (current) thread acquire all permits from
+ // `semaphore`. Then create an auxiliary thread acquiring a
+ // permit from `semaphore`, blocking because none is
+ // available. Have the main thread release one permit, thus
+ // unblocking the second thread.
+
+ // Auxiliary thread.
+ Thread auxThread = new Thread("Aux") {
+ public void run() {
+ try {
+ // Try to acquire one permit, and block until
+ // that permit is released by the main thread.
+ semaphore.acquire();
+ // When unblocked, release the acquired permit
+ // immediately.
+ semaphore.release();
+ } catch (InterruptedException ignored) {
+ throw new RuntimeException("Test set up failed in auxiliary thread");
+ }
+ }
+ };
+
+ // Main thread.
+ try {
+ // Acquire all permits.
+ semaphore.acquire(permits);
+ // Start the auxiliary thread and have it try to acquire a
+ // permit.
+ auxThread.start();
+ // Synchronization: Wait until the auxiliary thread is
+ // blocked trying to acquire a permit from `semaphore`.
+ while (!semaphore.hasQueuedThreads()) {
+ Thread.sleep(100);
+ }
+ // Release one permit, thus unblocking `auxThread` and let
+ // it acquire a permit.
+ semaphore.release();
+ // Synchronization: Wait for the auxiliary thread to die.
+ auxThread.join();
+ // Release remaining permits.
+ semaphore.release(permits - 1);
+
+ // Verify that all permits have been released.
+ assert semaphore.availablePermits() == permits;
+ } catch (InterruptedException ignored) {
+ throw new RuntimeException("Test set up failed in main thread");
+ }
}
public static void runTest(final int numberOfThreads, final int numberOfDaemons,
diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt
index e2a1001e5b..6a9bf61d25 100644
--- a/test/100-reflect2/expected.txt
+++ b/test/100-reflect2/expected.txt
@@ -33,7 +33,7 @@ z (class java.lang.Character)
14 (class java.lang.Short)
[java.lang.String(int,int,char[]), public java.lang.String(), public java.lang.String(byte[]), public java.lang.String(byte[],int), public java.lang.String(byte[],int,int), public java.lang.String(byte[],int,int,int), public java.lang.String(byte[],int,int,java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],int,int,java.nio.charset.Charset), public java.lang.String(byte[],java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],java.nio.charset.Charset), public java.lang.String(char[]), public java.lang.String(char[],int,int), public java.lang.String(int[],int,int), public java.lang.String(java.lang.String), public java.lang.String(java.lang.StringBuffer), public java.lang.String(java.lang.StringBuilder)]
[private final int java.lang.String.count, private int java.lang.String.hash, private static final java.io.ObjectStreamField[] java.lang.String.serialPersistentFields, private static final long java.lang.String.serialVersionUID, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER]
-[native void java.lang.String.getCharsNoCheck(int,int,char[],int), private boolean java.lang.String.nonSyncContentEquals(java.lang.AbstractStringBuilder), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private native int java.lang.String.fastIndexOf(int,int), private native java.lang.String java.lang.String.doReplace(char,char), private native java.lang.String java.lang.String.fastSubstring(int,int), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public boolean java.lang.String.isEmpty(), public boolean java.lang.String.matches(java.lang.String), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public byte[] java.lang.String.getBytes(), public byte[] java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public byte[] java.lang.String.getBytes(java.nio.charset.Charset), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public int java.lang.String.compareTo(java.lang.Object), public int java.lang.String.compareToIgnoreCase(java.lang.String), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public int java.lang.String.offsetByCodePoints(int,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public java.lang.String[] java.lang.String.split(java.lang.String), public java.lang.String[] java.lang.String.split(java.lang.String,int), public native char java.lang.String.charAt(int), public native char[] java.lang.String.toCharArray(), public native int java.lang.String.compareTo(java.lang.String), public native java.lang.String java.lang.String.concat(java.lang.String), public native java.lang.String java.lang.String.intern(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), public static java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.join(java.lang.CharSequence,java.lang.CharSequence[]), public static java.lang.String java.lang.String.join(java.lang.CharSequence,java.lang.Iterable), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(long), public void java.lang.String.getBytes(int,int,byte[],int), public void java.lang.String.getChars(int,int,char[],int), static int java.lang.String.indexOf(char[],int,int,char[],int,int,int), static int java.lang.String.indexOf(java.lang.String,java.lang.String,int), static int java.lang.String.lastIndexOf(char[],int,int,char[],int,int,int), static int java.lang.String.lastIndexOf(java.lang.String,java.lang.String,int), void java.lang.String.getChars(char[],int)]
+[native void java.lang.String.getCharsNoCheck(int,int,char[],int), private boolean java.lang.String.nonSyncContentEquals(java.lang.AbstractStringBuilder), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private native java.lang.String java.lang.String.doReplace(char,char), private native java.lang.String java.lang.String.fastSubstring(int,int), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public boolean java.lang.String.isEmpty(), public boolean java.lang.String.matches(java.lang.String), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public byte[] java.lang.String.getBytes(), public byte[] java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public byte[] java.lang.String.getBytes(java.nio.charset.Charset), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public int java.lang.String.compareTo(java.lang.Object), public int java.lang.String.compareToIgnoreCase(java.lang.String), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public int java.lang.String.offsetByCodePoints(int,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public java.lang.String[] java.lang.String.split(java.lang.String), public java.lang.String[] java.lang.String.split(java.lang.String,int), public native char java.lang.String.charAt(int), public native char[] java.lang.String.toCharArray(), public native int java.lang.String.compareTo(java.lang.String), public native java.lang.String java.lang.String.concat(java.lang.String), public native java.lang.String java.lang.String.intern(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), public static java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.join(java.lang.CharSequence,java.lang.CharSequence[]), public static java.lang.String java.lang.String.join(java.lang.CharSequence,java.lang.Iterable), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(long), public void java.lang.String.getBytes(int,int,byte[],int), public void java.lang.String.getChars(int,int,char[],int), static int java.lang.String.indexOf(char[],int,int,char[],int,int,int), static int java.lang.String.indexOf(java.lang.String,java.lang.String,int), static int java.lang.String.lastIndexOf(char[],int,int,char[],int,int,int), static int java.lang.String.lastIndexOf(java.lang.String,java.lang.String,int), void java.lang.String.getChars(char[],int)]
[]
[interface java.io.Serializable, interface java.lang.Comparable, interface java.lang.CharSequence]
0
diff --git a/test/1929-exception-catch-exception/build b/test/1929-exception-catch-exception/build
deleted file mode 100644
index 10ffcc537d..0000000000
--- a/test/1929-exception-catch-exception/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/1934-jvmti-signal-thread/src/art/Test1934.java b/test/1934-jvmti-signal-thread/src/art/Test1934.java
index 3e97b20bce..3ab4cf663b 100644
--- a/test/1934-jvmti-signal-thread/src/art/Test1934.java
+++ b/test/1934-jvmti-signal-thread/src/art/Test1934.java
@@ -173,11 +173,13 @@ public class Test1934 {
destroyNativeMonitor(native_monitor_id);
}
- public static void doRecur(Runnable r) {
+ public static void doRecurCnt(Runnable r, int cnt) {
if (r != null) {
r.run();
}
- doRecur(r);
+ if (cnt != 0) {
+ doRecurCnt(r, cnt - 1);
+ }
}
public static void testStopRecur() throws Exception {
@@ -186,27 +188,15 @@ public class Test1934 {
Thread target = new Thread(() -> {
sem.release();
while (true) {
- try {
- doRecur(null);
- } catch (StackOverflowError e) {}
+ doRecurCnt(null, 50);
}
}, "recuring thread!");
target.setUncaughtExceptionHandler((t, e) -> { out_err[0] = e; });
target.start();
sem.acquire();
System.out.println("stopping other thread recurring");
- do {
- // Due to the fact that dex has a specific instruction to get the current exception it is
- // possible for the 'stop-thread' to be unintentionally caught. We just retry in this case.
- try {
- Threads.stopThread(target, new Error("AWESOME!"));
- } catch (Exception e) {
- // If we just missed the thread dying we would get a JVMTI_ERROR_THREAD_NOT_ALIVE so we
- // catch that here.
- }
- // Wait for 1 second.
- target.join(1000);
- } while (target.isAlive());
+ Threads.stopThread(target, new Error("AWESOME!"));
+ target.join();
System.out.println("Other thread Stopped by: " + out_err[0]);
if (PRINT_STACK_TRACE && out_err[0] != null) {
out_err[0].printStackTrace();
@@ -219,11 +209,9 @@ public class Test1934 {
Thread target = new Thread(() -> {
sem.release();
while (true) {
- try {
- doRecur(() -> {
- if (Thread.currentThread().isInterrupted()) { throw new Error("Interrupted!"); }
- });
- } catch (StackOverflowError e) { }
+ doRecurCnt(() -> {
+ if (Thread.currentThread().isInterrupted()) { throw new Error("Interrupted!"); }
+ }, 50);
}
}, "recuring thread!");
target.setUncaughtExceptionHandler((t, e) -> { out_err[0] = e; });
diff --git a/test/458-checker-instruct-simplification/src/Main.java b/test/458-checker-instruct-simplification/src/Main.java
index 20858f560f..7797f31867 100644
--- a/test/458-checker-instruct-simplification/src/Main.java
+++ b/test/458-checker-instruct-simplification/src/Main.java
@@ -218,6 +218,28 @@ public class Main {
return (arg >> 24) & 255;
}
+ /// CHECK-START: int Main.$noinline$Shr25And127(int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const25:i\d+>> IntConstant 25
+ /// CHECK-DAG: <<Const127:i\d+>> IntConstant 127
+ /// CHECK-DAG: <<Shr:i\d+>> Shr [<<Arg>>,<<Const25>>]
+ /// CHECK-DAG: <<And:i\d+>> And [<<Shr>>,<<Const127>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: int Main.$noinline$Shr25And127(int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const25:i\d+>> IntConstant 25
+ /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Arg>>,<<Const25>>]
+ /// CHECK-DAG: Return [<<UShr>>]
+
+ /// CHECK-START: int Main.$noinline$Shr25And127(int) instruction_simplifier (after)
+ /// CHECK-NOT: Shr
+ /// CHECK-NOT: And
+
+ public static int $noinline$Shr25And127(int arg) {
+ return (arg >> 25) & 127;
+ }
+
/// CHECK-START: long Main.$noinline$Shr56And255(long) instruction_simplifier (before)
/// CHECK-DAG: <<Arg:j\d+>> ParameterValue
/// CHECK-DAG: <<Const56:i\d+>> IntConstant 56
@@ -240,6 +262,28 @@ public class Main {
return (arg >> 56) & 255;
}
+ /// CHECK-START: long Main.$noinline$Shr57And127(long) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Const57:i\d+>> IntConstant 57
+ /// CHECK-DAG: <<Const127:j\d+>> LongConstant 127
+ /// CHECK-DAG: <<Shr:j\d+>> Shr [<<Arg>>,<<Const57>>]
+ /// CHECK-DAG: <<And:j\d+>> And [<<Shr>>,<<Const127>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: long Main.$noinline$Shr57And127(long) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Const57:i\d+>> IntConstant 57
+ /// CHECK-DAG: <<UShr:j\d+>> UShr [<<Arg>>,<<Const57>>]
+ /// CHECK-DAG: Return [<<UShr>>]
+
+ /// CHECK-START: long Main.$noinline$Shr57And127(long) instruction_simplifier (after)
+ /// CHECK-NOT: Shr
+ /// CHECK-NOT: And
+
+ public static long $noinline$Shr57And127(long arg) {
+ return (arg >> 57) & 127;
+ }
+
/// CHECK-START: int Main.$noinline$Shr24And127(int) instruction_simplifier (before)
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
/// CHECK-DAG: <<Const24:i\d+>> IntConstant 24
@@ -2222,7 +2266,343 @@ public class Main {
return y + sub;
}
- public static void main(String[] args) {
+ /// CHECK-START: int Main.$noinline$getUint8FromInstanceByteField(Main) instruction_simplifier (before)
+ /// CHECK-DAG: <<Const255:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<Get:b\d+>> InstanceFieldGet
+ /// CHECK-DAG: <<And:i\d+>> And [<<Get>>,<<Const255>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: int Main.$noinline$getUint8FromInstanceByteField(Main) instruction_simplifier (after)
+ /// CHECK-DAG: <<Get:a\d+>> InstanceFieldGet
+ /// CHECK-DAG: Return [<<Get>>]
+
+ /// CHECK-START: int Main.$noinline$getUint8FromInstanceByteField(Main) instruction_simplifier (after)
+ /// CHECK-NOT: And
+ /// CHECK-NOT: TypeConversion
+ public static int $noinline$getUint8FromInstanceByteField(Main m) {
+ return m.instanceByteField & 0xff;
+ }
+
+ /// CHECK-START: int Main.$noinline$getUint8FromStaticByteField() instruction_simplifier (before)
+ /// CHECK-DAG: <<Const255:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<Get:b\d+>> StaticFieldGet
+ /// CHECK-DAG: <<And:i\d+>> And [<<Get>>,<<Const255>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: int Main.$noinline$getUint8FromStaticByteField() instruction_simplifier (after)
+ /// CHECK-DAG: <<Get:a\d+>> StaticFieldGet
+ /// CHECK-DAG: Return [<<Get>>]
+
+ /// CHECK-START: int Main.$noinline$getUint8FromStaticByteField() instruction_simplifier (after)
+ /// CHECK-NOT: And
+ /// CHECK-NOT: TypeConversion
+ public static int $noinline$getUint8FromStaticByteField() {
+ return staticByteField & 0xff;
+ }
+
+ /// CHECK-START: int Main.$noinline$getUint8FromByteArray(byte[]) instruction_simplifier (before)
+ /// CHECK-DAG: <<Const255:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<Get:b\d+>> ArrayGet
+ /// CHECK-DAG: <<And:i\d+>> And [<<Get>>,<<Const255>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: int Main.$noinline$getUint8FromByteArray(byte[]) instruction_simplifier (after)
+ /// CHECK-DAG: <<Get:a\d+>> ArrayGet
+ /// CHECK-DAG: Return [<<Get>>]
+
+ /// CHECK-START: int Main.$noinline$getUint8FromByteArray(byte[]) instruction_simplifier (after)
+ /// CHECK-NOT: And
+ /// CHECK-NOT: TypeConversion
+ public static int $noinline$getUint8FromByteArray(byte[] a) {
+ return a[0] & 0xff;
+ }
+
+ /// CHECK-START: int Main.$noinline$getUint16FromInstanceShortField(Main) instruction_simplifier (before)
+ /// CHECK-DAG: <<Cst65535:i\d+>> IntConstant 65535
+ /// CHECK-DAG: <<Get:s\d+>> InstanceFieldGet
+ /// CHECK-DAG: <<And:i\d+>> And [<<Get>>,<<Cst65535>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: int Main.$noinline$getUint16FromInstanceShortField(Main) instruction_simplifier (after)
+ /// CHECK-DAG: <<Get:c\d+>> InstanceFieldGet
+ /// CHECK-DAG: Return [<<Get>>]
+
+ /// CHECK-START: int Main.$noinline$getUint16FromInstanceShortField(Main) instruction_simplifier (after)
+ /// CHECK-NOT: And
+ /// CHECK-NOT: TypeConversion
+ public static int $noinline$getUint16FromInstanceShortField(Main m) {
+ return m.instanceShortField & 0xffff;
+ }
+
+ /// CHECK-START: int Main.$noinline$getUint16FromStaticShortField() instruction_simplifier (before)
+ /// CHECK-DAG: <<Cst65535:i\d+>> IntConstant 65535
+ /// CHECK-DAG: <<Get:s\d+>> StaticFieldGet
+ /// CHECK-DAG: <<And:i\d+>> And [<<Get>>,<<Cst65535>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: int Main.$noinline$getUint16FromStaticShortField() instruction_simplifier (after)
+ /// CHECK-DAG: <<Get:c\d+>> StaticFieldGet
+ /// CHECK-DAG: Return [<<Get>>]
+
+ /// CHECK-START: int Main.$noinline$getUint16FromStaticShortField() instruction_simplifier (after)
+ /// CHECK-NOT: And
+ /// CHECK-NOT: TypeConversion
+ public static int $noinline$getUint16FromStaticShortField() {
+ return staticShortField & 0xffff;
+ }
+
+ /// CHECK-START: int Main.$noinline$getUint16FromShortArray(short[]) instruction_simplifier (before)
+ /// CHECK-DAG: <<Cst65535:i\d+>> IntConstant 65535
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet
+ /// CHECK-DAG: <<And:i\d+>> And [<<Get>>,<<Cst65535>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: int Main.$noinline$getUint16FromShortArray(short[]) instruction_simplifier (after)
+ /// CHECK-DAG: <<Get:c\d+>> ArrayGet
+ /// CHECK-DAG: Return [<<Get>>]
+
+ /// CHECK-START: int Main.$noinline$getUint16FromShortArray(short[]) instruction_simplifier (after)
+ /// CHECK-NOT: And
+ /// CHECK-NOT: TypeConversion
+ public static int $noinline$getUint16FromShortArray(short[] a) {
+ return a[0] & 0xffff;
+ }
+
+ /// CHECK-START: int Main.$noinline$getInt16FromInstanceCharField(Main) instruction_simplifier (before)
+ /// CHECK-DAG: <<Get:c\d+>> InstanceFieldGet
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [<<Get>>]
+ /// CHECK-DAG: Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$getInt16FromInstanceCharField(Main) instruction_simplifier (after)
+ /// CHECK-DAG: <<Get:s\d+>> InstanceFieldGet
+ /// CHECK-DAG: Return [<<Get>>]
+
+ /// CHECK-START: int Main.$noinline$getInt16FromInstanceCharField(Main) instruction_simplifier (after)
+ /// CHECK-NOT: And
+ /// CHECK-NOT: TypeConversion
+ public static int $noinline$getInt16FromInstanceCharField(Main m) {
+ return (short) m.instanceCharField;
+ }
+
+ /// CHECK-START: int Main.$noinline$getInt16FromStaticCharField() instruction_simplifier (before)
+ /// CHECK-DAG: <<Get:c\d+>> StaticFieldGet
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [<<Get>>]
+ /// CHECK-DAG: Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$getInt16FromStaticCharField() instruction_simplifier (after)
+ /// CHECK-DAG: <<Get:s\d+>> StaticFieldGet
+ /// CHECK-DAG: Return [<<Get>>]
+
+ /// CHECK-START: int Main.$noinline$getInt16FromStaticCharField() instruction_simplifier (after)
+ /// CHECK-NOT: And
+ /// CHECK-NOT: TypeConversion
+ public static int $noinline$getInt16FromStaticCharField() {
+ return (short) staticCharField;
+ }
+
+ /// CHECK-START: int Main.$noinline$getInt16FromCharArray(char[]) instruction_simplifier (before)
+ /// CHECK-DAG: <<Get:c\d+>> ArrayGet
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [<<Get>>]
+ /// CHECK-DAG: Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$getInt16FromCharArray(char[]) instruction_simplifier (after)
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet
+ /// CHECK-DAG: Return [<<Get>>]
+
+ /// CHECK-START: int Main.$noinline$getInt16FromCharArray(char[]) instruction_simplifier (after)
+ /// CHECK-NOT: And
+ /// CHECK-NOT: TypeConversion
+ public static int $noinline$getInt16FromCharArray(char[] a) {
+ return (short) a[0];
+ }
+
+ /// CHECK-START: int Main.$noinline$byteToUint8AndBack() instruction_simplifier (before)
+ /// CHECK-DAG: <<Const255:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<Get:b\d+>> StaticFieldGet
+ /// CHECK-DAG: <<And:i\d+>> And [<<Get>>,<<Const255>>]
+ /// CHECK-DAG: <<Invoke:i\d+>> InvokeStaticOrDirect [<<And>>{{(,[ij]\d+)?}}]
+ /// CHECK-DAG: Return [<<Invoke>>]
+
+ /// CHECK-START: int Main.$noinline$byteToUint8AndBack() instruction_simplifier (after)
+ /// CHECK-DAG: <<Get:a\d+>> StaticFieldGet
+ /// CHECK-DAG: <<Invoke:i\d+>> InvokeStaticOrDirect [<<Get>>{{(,[ij]\d+)?}}]
+ /// CHECK-DAG: Return [<<Invoke>>]
+
+ /// CHECK-START: int Main.$noinline$byteToUint8AndBack() instruction_simplifier$after_inlining (before)
+ /// CHECK-DAG: <<Get:a\d+>> StaticFieldGet
+ /// CHECK-DAG: <<Conv:b\d+>> TypeConversion [<<Get>>]
+ /// CHECK-DAG: Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$byteToUint8AndBack() instruction_simplifier$after_inlining (after)
+ /// CHECK-DAG: <<Get:b\d+>> StaticFieldGet
+ /// CHECK-DAG: Return [<<Get>>]
+ public static int $noinline$byteToUint8AndBack() {
+ return $inline$toByte(staticByteField & 0xff);
+ }
+
+ public static int $inline$toByte(int value) {
+ return (byte) value;
+ }
+
+ /// CHECK-START: int Main.$noinline$getStaticCharFieldAnd0xff() instruction_simplifier (before)
+ /// CHECK-DAG: <<Const255:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<Get:c\d+>> StaticFieldGet
+ /// CHECK-DAG: <<And:i\d+>> And [<<Get>>,<<Const255>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: int Main.$noinline$getStaticCharFieldAnd0xff() instruction_simplifier (after)
+ /// CHECK-DAG: <<Const255:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<Get:c\d+>> StaticFieldGet
+ /// CHECK-DAG: <<Cnv:a\d+>> TypeConversion [<<Get>>]
+ /// CHECK-DAG: Return [<<Cnv>>]
+
+ /// CHECK-START: int Main.$noinline$getStaticCharFieldAnd0xff() instruction_simplifier (after)
+ /// CHECK-NOT: {{a\d+}} StaticFieldGet
+ public static int $noinline$getStaticCharFieldAnd0xff() {
+ return staticCharField & 0xff;
+ }
+
+ /// CHECK-START: int Main.$noinline$getUint8FromInstanceByteFieldWithAnotherUse(Main) instruction_simplifier (before)
+ /// CHECK-DAG: <<Const8:i\d+>> IntConstant 8
+ /// CHECK-DAG: <<Const255:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<Get:b\d+>> InstanceFieldGet
+ /// CHECK-DAG: <<And:i\d+>> And [<<Get>>,<<Const255>>]
+ /// CHECK-DAG: <<Shl:i\d+>> Shl [<<Get>>,<<Const8>>]
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<And>>,<<Shl>>]
+ /// CHECK-DAG: Return [<<Add>>]
+
+ /// CHECK-START: int Main.$noinline$getUint8FromInstanceByteFieldWithAnotherUse(Main) instruction_simplifier (after)
+ /// CHECK-DAG: <<Const8:i\d+>> IntConstant 8
+ /// CHECK-DAG: <<Const255:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<Get:b\d+>> InstanceFieldGet
+ /// CHECK-DAG: <<Cnv:a\d+>> TypeConversion [<<Get>>]
+ /// CHECK-DAG: <<Shl:i\d+>> Shl [<<Get>>,<<Const8>>]
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Cnv>>,<<Shl>>]
+ /// CHECK-DAG: Return [<<Add>>]
+
+ /// CHECK-START: int Main.$noinline$getUint8FromInstanceByteFieldWithAnotherUse(Main) instruction_simplifier (after)
+ /// CHECK-NOT: {{a\d+}} InstanceFieldGet
+ public static int $noinline$getUint8FromInstanceByteFieldWithAnotherUse(Main m) {
+ byte b = m.instanceByteField;
+ int v1 = b & 0xff;
+ int v2 = (b << 8);
+ return v1 + v2;
+ }
+
+ /// CHECK-START: int Main.$noinline$intAnd0xffToChar(int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const255:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<And:i\d+>> And [<<Arg>>,<<Const255>>]
+ /// CHECK-DAG: <<Conv:c\d+>> TypeConversion [<<And>>]
+ /// CHECK-DAG: Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$intAnd0xffToChar(int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Conv:a\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: Return [<<Conv>>]
+ public static int $noinline$intAnd0xffToChar(int value) {
+ return (char) (value & 0xff);
+ }
+
+ /// CHECK-START: int Main.$noinline$intAnd0x1ffToChar(int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const511:i\d+>> IntConstant 511
+ /// CHECK-DAG: <<And:i\d+>> And [<<Arg>>,<<Const511>>]
+ /// CHECK-DAG: <<Conv:c\d+>> TypeConversion [<<And>>]
+ /// CHECK-DAG: Return [<<Conv>>]
+
+ // TODO: Simplify this. Unlike the $noinline$intAnd0xffToChar(), the TypeConversion
+ // to `char` is not eliminated despite the result of the And being within the `char` range.
+
+ // CHECK-START: int Main.$noinline$intAnd0x1ffToChar(int) instruction_simplifier (after)
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const511:i\d+>> IntConstant 511
+ // CHECK-DAG: <<And:i\d+>> And [<<Arg>>,<<Const511>>]
+ // CHECK-DAG: Return [<<And>>]
+ public static int $noinline$intAnd0x1ffToChar(int value) {
+ return (char) (value & 0x1ff);
+ }
+
+ /// CHECK-START: int Main.$noinline$getInstanceCharFieldAnd0x1ffff(Main) instruction_simplifier (before)
+ /// CHECK-DAG: <<Cst1ffff:i\d+>> IntConstant 131071
+ /// CHECK-DAG: <<Get:c\d+>> InstanceFieldGet
+ /// CHECK-DAG: <<And:i\d+>> And [<<Get>>,<<Cst1ffff>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: int Main.$noinline$getInstanceCharFieldAnd0x1ffff(Main) instruction_simplifier (after)
+ /// CHECK-DAG: <<Get:c\d+>> InstanceFieldGet
+ /// CHECK-DAG: Return [<<Get>>]
+ public static int $noinline$getInstanceCharFieldAnd0x1ffff(Main m) {
+ return m.instanceCharField & 0x1ffff;
+ }
+
+ /// CHECK-START: int Main.$noinline$bug68142795Byte(byte) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:b\d+>> ParameterValue
+ /// CHECK-DAG: <<Const:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<And1:i\d+>> And [<<Arg>>,<<Const>>]
+ /// CHECK-DAG: <<And2:i\d+>> And [<<And1>>,<<Const>>]
+ /// CHECK-DAG: <<Conv:b\d+>> TypeConversion [<<And2>>]
+ /// CHECK-DAG: Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$bug68142795Byte(byte) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:b\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+ public static int $noinline$bug68142795Byte(byte b) {
+ return (byte)(0xff & (b & 0xff));
+ }
+
+ /// CHECK-START: int Main.$noinline$bug68142795Short(short) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: <<Const:i\d+>> IntConstant 65535
+ /// CHECK-DAG: <<And1:i\d+>> And [<<Arg>>,<<Const>>]
+ /// CHECK-DAG: <<And2:i\d+>> And [<<And1>>,<<Const>>]
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [<<And2>>]
+ /// CHECK-DAG: Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$bug68142795Short(short) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+ public static int $noinline$bug68142795Short(short s) {
+ return (short)(0xffff & (s & 0xffff));
+ }
+
+ /// CHECK-START: int Main.$noinline$bug68142795Boolean(boolean) instruction_simplifier$after_inlining (before)
+ /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<Const255:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<Select:i\d+>> Select [<<Const0>>,<<Const1>>,<<Arg>>]
+ /// CHECK-DAG: <<And:i\d+>> And [<<Const255>>,<<Select>>]
+ /// CHECK-DAG: <<Conv:b\d+>> TypeConversion [<<And>>]
+ /// CHECK-DAG: Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$bug68142795Boolean(boolean) instruction_simplifier$after_inlining (after)
+ /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+ public static int $noinline$bug68142795Boolean(boolean b) {
+ int v = b ? 1 : 0; // Should be simplified to "b" after inlining.
+ return (byte)($inline$get255() & v);
+ }
+
+ /// CHECK-START: int Main.$noinline$bug68142795Elaborate(byte) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:b\d+>> ParameterValue
+ /// CHECK-DAG: <<Int255:i\d+>> IntConstant 255
+ /// CHECK-DAG: <<Long255:j\d+>> LongConstant 255
+ /// CHECK-DAG: <<And1:i\d+>> And [<<Arg>>,<<Int255>>]
+ /// CHECK-DAG: <<Conv1:j\d+>> TypeConversion [<<And1>>]
+ /// CHECK-DAG: <<And2:j\d+>> And [<<Conv1>>,<<Long255>>]
+ /// CHECK-DAG: <<Conv2:i\d+>> TypeConversion [<<And2>>]
+ /// CHECK-DAG: <<Conv3:b\d+>> TypeConversion [<<Conv2>>]
+ /// CHECK-DAG: Return [<<Conv3>>]
+
+ /// CHECK-START: int Main.$noinline$bug68142795Elaborate(byte) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:b\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+ public static int $noinline$bug68142795Elaborate(byte b) {
+ return (byte)((int)(((long)(b & 0xff)) & 255L));
+ }
+
+ public static void main(String[] args) {
int arg = 123456;
float floatArg = 123456.125f;
@@ -2282,7 +2662,9 @@ public class Main {
assertIntEquals(0x4, $noinline$UShr28And7(0xc1234567));
assertLongEquals(0x4L, $noinline$UShr60And7(0xc123456787654321L));
assertIntEquals(0xc1, $noinline$Shr24And255(0xc1234567));
+ assertIntEquals(0x60, $noinline$Shr25And127(0xc1234567));
assertLongEquals(0xc1L, $noinline$Shr56And255(0xc123456787654321L));
+ assertLongEquals(0x60L, $noinline$Shr57And127(0xc123456787654321L));
assertIntEquals(0x41, $noinline$Shr24And127(0xc1234567));
assertLongEquals(0x41L, $noinline$Shr56And127(0xc123456787654321L));
assertIntEquals(0, $noinline$mulPow2Plus1(0));
@@ -2422,10 +2804,60 @@ public class Main {
assertFloatEquals(floatArg, $noinline$floatAddSubSimplifyArg2(floatArg, 654321.125f));
assertFloatEquals(floatArg, $noinline$floatSubAddSimplifyLeft(floatArg, 654321.125f));
assertFloatEquals(floatArg, $noinline$floatSubAddSimplifyRight(floatArg, 654321.125f));
+
+ Main m = new Main();
+ m.instanceByteField = -1;
+ assertIntEquals(0xff, $noinline$getUint8FromInstanceByteField(m));
+ staticByteField = -2;
+ assertIntEquals(0xfe, $noinline$getUint8FromStaticByteField());
+ assertIntEquals(0xfd, $noinline$getUint8FromByteArray(new byte[] { -3 }));
+ m.instanceShortField = -4;
+ assertIntEquals(0xfffc, $noinline$getUint16FromInstanceShortField(m));
+ staticShortField = -5;
+ assertIntEquals(0xfffb, $noinline$getUint16FromStaticShortField());
+ assertIntEquals(0xfffa, $noinline$getUint16FromShortArray(new short[] { -6 }));
+ m.instanceCharField = 0xfff9;
+ assertIntEquals(-7, $noinline$getInt16FromInstanceCharField(m));
+ staticCharField = 0xfff8;
+ assertIntEquals(-8, $noinline$getInt16FromStaticCharField());
+ assertIntEquals(-9, $noinline$getInt16FromCharArray(new char[] { 0xfff7 }));
+
+ staticCharField = 0xfff6;
+ assertIntEquals(0xf6, $noinline$getStaticCharFieldAnd0xff());
+
+ staticByteField = -11;
+ assertIntEquals(-11, $noinline$byteToUint8AndBack());
+
+ m.instanceByteField = -12;
+ assertIntEquals(0xfffff4f4, $noinline$getUint8FromInstanceByteFieldWithAnotherUse(m));
+
+ assertIntEquals(0x21, $noinline$intAnd0xffToChar(0x87654321));
+ assertIntEquals(0x121, $noinline$intAnd0x1ffToChar(0x87654321));
+
+ m.instanceCharField = 'x';
+ assertIntEquals('x', $noinline$getInstanceCharFieldAnd0x1ffff(m));
+
+ assertIntEquals(0x7f, $noinline$bug68142795Byte((byte) 0x7f));
+ assertIntEquals((byte) 0x80, $noinline$bug68142795Byte((byte) 0x80));
+ assertIntEquals(0x7fff, $noinline$bug68142795Short((short) 0x7fff));
+ assertIntEquals((short) 0x8000, $noinline$bug68142795Short((short) 0x8000));
+ assertIntEquals(0, $noinline$bug68142795Boolean(false));
+ assertIntEquals(1, $noinline$bug68142795Boolean(true));
+ assertIntEquals(0x7f, $noinline$bug68142795Elaborate((byte) 0x7f));
+ assertIntEquals((byte) 0x80, $noinline$bug68142795Elaborate((byte) 0x80));
}
private static boolean $inline$true() { return true; }
private static boolean $inline$false() { return false; }
+ private static int $inline$get255() { return 255; }
public static boolean booleanField;
+
+ public static byte staticByteField;
+ public static char staticCharField;
+ public static short staticShortField;
+
+ public byte instanceByteField;
+ public char instanceCharField;
+ public short instanceShortField;
}
diff --git a/test/482-checker-loop-back-edge-use/build b/test/482-checker-loop-back-edge-use/build
deleted file mode 100644
index 10ffcc537d..0000000000
--- a/test/482-checker-loop-back-edge-use/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/482-checker-loop-back-edge-use/src/Main.java b/test/482-checker-loop-back-edge-use/src/Main.java
index 86977d1b8e..47823409a3 100644
--- a/test/482-checker-loop-back-edge-use/src/Main.java
+++ b/test/482-checker-loop-back-edge-use/src/Main.java
@@ -174,8 +174,8 @@ public class Main {
/// CHECK: <<Arg:z\d+>> StaticFieldGet liveness:<<ArgLiv:\d+>> ranges:{[<<ArgLiv>>,<<ArgLoopUse:\d+>>)} uses:[<<ArgUse:\d+>>,<<ArgLoopUse>>]
/// CHECK: If [<<Arg>>] liveness:<<IfLiv:\d+>>
/// CHECK: Goto liveness:<<GotoLiv1:\d+>>
- /// CHECK: Exit
- /// CHECK: Goto liveness:<<GotoLiv2:\d+>>
+ /// CHECK-DAG: Goto liveness:<<GotoLiv2:\d+>>
+ /// CHECK-DAG: Exit
/// CHECK-EVAL: <<IfLiv>> + 1 == <<ArgUse>>
/// CHECK-EVAL: <<GotoLiv1>> < <<GotoLiv2>>
/// CHECK-EVAL: <<GotoLiv1>> + 2 == <<ArgLoopUse>>
diff --git a/test/484-checker-register-hints/build b/test/484-checker-register-hints/build
deleted file mode 100644
index 10ffcc537d..0000000000
--- a/test/484-checker-register-hints/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/484-checker-register-hints/smali/Smali.smali b/test/484-checker-register-hints/smali/Smali.smali
new file mode 100644
index 0000000000..659493611f
--- /dev/null
+++ b/test/484-checker-register-hints/smali/Smali.smali
@@ -0,0 +1,143 @@
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+.class public LSmali;
+.super Ljava/lang/Object;
+
+## CHECK-START: void Smali.test3Order1(boolean, int, int, int, int, int) register (after)
+## CHECK: name "B0"
+## CHECK-NOT: ParallelMove
+## CHECK: name "B1"
+## CHECK-NOT: end_block
+## CHECK: If
+## CHECK-NOT: ParallelMove
+## CHECK: name "B6"
+## CHECK-NOT: end_block
+## CHECK: InstanceFieldSet
+# We could check here that there is a parallel move, but it's only valid
+# for some architectures (for example x86), as other architectures may
+# not do move at all.
+## CHECK: end_block
+## CHECK-NOT: ParallelMove
+.method public static test3Order1(ZIIIII)V
+ .registers 14
+
+ sget v0, LMain;->live1:I
+ sget v1, LMain;->live2:I
+ sget v2, LMain;->live3:I
+ sget v5, LMain;->live0:I
+ if-eqz p0, :cond_13
+
+ sput v0, LMain;->live1:I
+
+ :goto_c
+ add-int v6, v0, v1
+ add-int/2addr v6, v2
+ add-int/2addr v6, v5
+ sput v6, LMain;->live1:I
+
+ return-void
+
+ :cond_13
+ sget-boolean v6, LMain;->y:Z
+
+ if-eqz v6, :cond_1a
+ sput v0, LMain;->live1:I
+ goto :goto_c
+
+ :cond_1a
+ sget v3, LMain;->live4:I
+
+ sget v4, LMain;->live5:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v0, v4
+ add-int/2addr v7, v3
+ iput v7, v6, LMain$Foo;->field2:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v1, v4
+ add-int/2addr v7, v3
+ iput v7, v6, LMain$Foo;->field3:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v2, v4
+ add-int/2addr v7, v3
+ iput v7, v6, LMain$Foo;->field4:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ iput v3, v6, LMain$Foo;->field0:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v4, v3
+ iput v7, v6, LMain$Foo;->field1:I
+ goto :goto_c
+.end method
+
+## CHECK-START: void Smali.test3Order2(boolean, int, int, int, int, int) register (after)
+## CHECK: name "B0"
+## CHECK-NOT: ParallelMove
+## CHECK: name "B1"
+## CHECK-NOT: end_block
+## CHECK: If
+## CHECK-NOT: ParallelMove
+## CHECK: name "B5"
+## CHECK-NOT: end_block
+## CHECK: InstanceFieldSet
+# We could check here that there is a parallel move, but it's only valid
+# for some architectures (for example x86), as other architectures may
+# not do move at all.
+## CHECK: end_block
+## CHECK-NOT: ParallelMove
+.method public static test3Order2(ZIIIII)V
+ .registers 14
+
+ sget v0, LMain;->live1:I
+ sget v1, LMain;->live2:I
+ sget v2, LMain;->live3:I
+ sget v3, LMain;->live0:I
+ if-eqz p0, :cond_d
+
+ sput v0, LMain;->live1:I
+ goto :goto_37
+
+ :cond_d
+ sget-boolean v4, LMain;->y:Z
+ if-eqz v4, :cond_14
+
+ sput v0, LMain;->live1:I
+ goto :goto_37
+
+ :cond_14
+ sget v4, LMain;->live4:I
+ sget v5, LMain;->live5:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v0, v5
+ add-int/2addr v7, v4
+ iput v7, v6, LMain$Foo;->field2:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v1, v5
+ add-int/2addr v7, v4
+ iput v7, v6, LMain$Foo;->field3:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v2, v5
+ add-int/2addr v7, v4
+ iput v7, v6, LMain$Foo;->field4:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ iput v4, v6, LMain$Foo;->field0:I
+ sget-object v6, LMain;->foo:LMain$Foo;
+ add-int v7, v5, v4
+ iput v7, v6, LMain$Foo;->field1:I
+ :goto_37
+
+ add-int v4, v0, v1
+ add-int/2addr v4, v2
+ add-int/2addr v4, v3
+ sput v4, LMain;->live1:I
+ return-void
+.end method
diff --git a/test/484-checker-register-hints/src/Main.java b/test/484-checker-register-hints/src/Main.java
index 6e68f7c91e..7aab6598a1 100644
--- a/test/484-checker-register-hints/src/Main.java
+++ b/test/484-checker-register-hints/src/Main.java
@@ -98,18 +98,6 @@ public class Main {
/// CHECK: name "B0"
/// CHECK-NOT: ParallelMove
/// CHECK: name "B1"
- /// CHECK-NOT: end_block
- /// CHECK: If
- /// CHECK-NOT: ParallelMove
- /// CHECK: name "B6"
- /// CHECK-NOT: end_block
- /// CHECK: InstanceFieldSet
- // We could check here that there is a parallel move, but it's only valid
- // for some architectures (for example x86), as other architectures may
- // not do move at all.
- /// CHECK: end_block
- /// CHECK-NOT: ParallelMove
-
public static void test3(boolean z, int a, int b, int c, int d, int m) {
// Same version as test2, but with branches reversed, to ensure
// whatever linear order is computed, we will get the same results.
diff --git a/test/538-checker-embed-constants/src/Main.java b/test/538-checker-embed-constants/src/Main.java
index 94aad9d328..049c97f59f 100644
--- a/test/538-checker-embed-constants/src/Main.java
+++ b/test/538-checker-embed-constants/src/Main.java
@@ -28,9 +28,17 @@ public class Main {
}
}
+ /// CHECK-START-ARM: int Main.and254(int) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #254
+ /// CHECK: and {{r\d+}}, {{r\d+}}, #0xfe
+
+ public static int and254(int arg) {
+ return arg & 254;
+ }
+
/// CHECK-START-ARM: int Main.and255(int) disassembly (after)
/// CHECK-NOT: movs {{r\d+}}, #255
- /// CHECK: and {{r\d+}}, {{r\d+}}, #0xff
+ /// CHECK: ubfx {{r\d+}}, {{r\d+}}, #0, #8
public static int and255(int arg) {
return arg & 255;
@@ -648,6 +656,7 @@ public class Main {
public static void main(String[] args) {
int arg = 0x87654321;
+ assertIntEquals(and254(arg), 0x20);
assertIntEquals(and255(arg), 0x21);
assertIntEquals(and511(arg), 0x121);
assertIntEquals(andF00D(arg), 0x4001);
diff --git a/test/586-checker-null-array-get/build b/test/586-checker-null-array-get/build
index 3721955670..49292c9ac1 100755
--- a/test/586-checker-null-array-get/build
+++ b/test/586-checker-null-array-get/build
@@ -20,7 +20,4 @@ export USE_JACK=false
# Also disable desugar because it is missing in jack platform builds.
export DESUGAR=false
-# See b/65168732
-export USE_D8=false
-
./default-build "$@"
diff --git a/test/586-checker-null-array-get/smali/SmaliTests.smali b/test/586-checker-null-array-get/smali/SmaliTests.smali
index f58af360fc..04da2d2988 100644
--- a/test/586-checker-null-array-get/smali/SmaliTests.smali
+++ b/test/586-checker-null-array-get/smali/SmaliTests.smali
@@ -80,6 +80,77 @@
return-void
.end method
+# This is indentical to bar() except that it has two check-casts
+# that DX tends to generate.
+
+## CHECK-START: void SmaliTests.bar2() load_store_elimination (after)
+## CHECK-DAG: <<Null:l\d+>> NullConstant
+## CHECK-DAG: <<BoundFirst:l\d+>> BoundType [<<Null>>]
+## CHECK-DAG: <<BoundType:l\d+>> BoundType [<<BoundFirst>>]
+## CHECK-DAG: <<CheckL:l\d+>> NullCheck [<<BoundType>>]
+## CHECK-DAG: <<GetL0:l\d+>> ArrayGet [<<CheckL>>,{{i\d+}}]
+## CHECK-DAG: <<GetL1:l\d+>> ArrayGet [<<CheckL>>,{{i\d+}}]
+## CHECK-DAG: <<GetL2:l\d+>> ArrayGet [<<CheckL>>,{{i\d+}}]
+## CHECK-DAG: <<GetL3:l\d+>> ArrayGet [<<CheckL>>,{{i\d+}}]
+## CHECK-DAG: <<CheckJ:l\d+>> NullCheck [<<Null>>]
+## CHECK-DAG: <<GetJ0:j\d+>> ArrayGet [<<CheckJ>>,{{i\d+}}]
+## CHECK-DAG: <<GetJ1:j\d+>> ArrayGet [<<CheckJ>>,{{i\d+}}]
+## CHECK-DAG: <<GetJ2:j\d+>> ArrayGet [<<CheckJ>>,{{i\d+}}]
+## CHECK-DAG: <<GetJ3:j\d+>> ArrayGet [<<CheckJ>>,{{i\d+}}]
+.method public static bar2()V
+ .registers 7
+
+ .prologue
+ const/4 v6, 0x3
+ const/4 v5, 0x2
+ const/4 v4, 0x1
+ const/4 v3, 0x0
+
+ # We create multiple accesses that will lead the bounds check
+ # elimination pass to add a HDeoptimize. Not having the bounds check helped
+ # the load store elimination think it could merge two ArrayGet with different
+ # types.
+
+ # String[] array = (String[])getNull();
+ invoke-static {}, LMain;->getNull()Ljava/lang/Object;
+ move-result-object v2
+ check-cast v2, [Ljava/lang/String;
+
+ move-object v0, v2
+ check-cast v0, [Ljava/lang/String;
+
+ # objectField = array[0];
+ aget-object v2, v0, v3
+ sput-object v2, LMain;->objectField:Ljava/lang/Object;
+ # objectField = array[1];
+ aget-object v2, v0, v4
+ sput-object v2, LMain;->objectField:Ljava/lang/Object;
+ # objectField = array[2];
+ aget-object v2, v0, v5
+ sput-object v2, LMain;->objectField:Ljava/lang/Object;
+ # objectField = array[3];
+ aget-object v2, v0, v6
+ sput-object v2, LMain;->objectField:Ljava/lang/Object;
+
+ # long[] longArray = getLongArray();
+ invoke-static {}, LMain;->getLongArray()[J
+ move-result-object v1
+
+ # longField = longArray[0];
+ aget-wide v2, v1, v3
+ sput-wide v2, LMain;->longField:J
+ # longField = longArray[1];
+ aget-wide v2, v1, v4
+ sput-wide v2, LMain;->longField:J
+ # longField = longArray[2];
+ aget-wide v2, v1, v5
+ sput-wide v2, LMain;->longField:J
+ # longField = longArray[3];
+ aget-wide v2, v1, v6
+ sput-wide v2, LMain;->longField:J
+
+ return-void
+.end method
# static fields
.field static doThrow:Z # boolean
diff --git a/test/586-checker-null-array-get/src/Main.java b/test/586-checker-null-array-get/src/Main.java
index 09ebff16c2..de9429fd8b 100644
--- a/test/586-checker-null-array-get/src/Main.java
+++ b/test/586-checker-null-array-get/src/Main.java
@@ -65,6 +65,14 @@ public class Main {
} catch (Throwable t) {
throw new Error("Unexpected Throwable", t);
}
+ try {
+ $noinline$runSmaliTest("bar2");
+ throw new Error("Expected NullPointerException");
+ } catch (NullPointerException e) {
+ // Expected.
+ } catch (Throwable t) {
+ throw new Error("Unexpected Throwable", t);
+ }
try {
test1();
@@ -86,9 +94,8 @@ public class Main {
/// CHECK-START: void Main.bar() load_store_elimination (after)
/// CHECK-DAG: <<Null:l\d+>> NullConstant
- /// CHECK-DAG: <<BoundFirst:l\d+>> BoundType [<<Null>>]
- /// CHECK-DAG: <<BoundType:l\d+>> BoundType [<<BoundFirst>>]
- /// CHECK-DAG: <<CheckL:l\d+>> NullCheck [<<BoundType>>]
+ /// CHECK-DAG: BoundType [<<Null>>]
+ /// CHECK-DAG: <<CheckL:l\d+>> NullCheck
/// CHECK-DAG: <<GetL0:l\d+>> ArrayGet [<<CheckL>>,{{i\d+}}]
/// CHECK-DAG: <<GetL1:l\d+>> ArrayGet [<<CheckL>>,{{i\d+}}]
/// CHECK-DAG: <<GetL2:l\d+>> ArrayGet [<<CheckL>>,{{i\d+}}]
diff --git a/test/593-checker-boolean-2-integral-conv/build b/test/593-checker-boolean-2-integral-conv/build
index 3721955670..49292c9ac1 100755
--- a/test/593-checker-boolean-2-integral-conv/build
+++ b/test/593-checker-boolean-2-integral-conv/build
@@ -20,7 +20,4 @@ export USE_JACK=false
# Also disable desugar because it is missing in jack platform builds.
export DESUGAR=false
-# See b/65168732
-export USE_D8=false
-
./default-build "$@"
diff --git a/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali b/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
index 00ebaaf451..494ab95434 100644
--- a/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
+++ b/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
@@ -30,6 +30,143 @@
return-void
.end method
+## CHECK-START: byte SmaliTests.booleanToByte(boolean) builder (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
+## CHECK-DAG: If [<<Cond>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<IToS:b\d+>> TypeConversion [<<Phi>>]
+## CHECK-DAG: Return [<<IToS>>]
+
+## CHECK-START: byte SmaliTests.booleanToByte(boolean) select_generator (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
+## CHECK-DAG: <<IToS:b\d+>> TypeConversion [<<Sel>>]
+## CHECK-DAG: Return [<<IToS>>]
+
+## CHECK-START: byte SmaliTests.booleanToByte(boolean) instruction_simplifier$after_bce (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: Return [<<Arg>>]
+.method static booleanToByte(Z)B
+ .registers 2
+ if-eqz p0, :cond_5
+ const/4 v0, 0x1
+
+ :goto_3
+ int-to-byte v0, v0
+ return v0
+
+ :cond_5
+ const/4 v0, 0x0
+ goto :goto_3
+.end method
+
+## CHECK-START: short SmaliTests.booleanToShort(boolean) builder (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
+## CHECK-DAG: If [<<Cond>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<IToS:s\d+>> TypeConversion [<<Phi>>]
+## CHECK-DAG: Return [<<IToS>>]
+
+## CHECK-START: short SmaliTests.booleanToShort(boolean) select_generator (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
+## CHECK-DAG: <<IToS:s\d+>> TypeConversion [<<Sel>>]
+## CHECK-DAG: Return [<<IToS>>]
+
+## CHECK-START: short SmaliTests.booleanToShort(boolean) instruction_simplifier$after_bce (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: Return [<<Arg>>]
+.method static booleanToShort(Z)S
+ .registers 2
+ if-eqz p0, :cond_5
+ const/4 v0, 0x1
+
+ :goto_3
+ int-to-short v0, v0
+ return v0
+
+ :cond_5
+ const/4 v0, 0x0
+ goto :goto_3
+.end method
+
+## CHECK-START: char SmaliTests.booleanToChar(boolean) builder (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
+## CHECK-DAG: If [<<Cond>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<IToC:c\d+>> TypeConversion [<<Phi>>]
+## CHECK-DAG: Return [<<IToC>>]
+
+## CHECK-START: char SmaliTests.booleanToChar(boolean) select_generator (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
+## CHECK-DAG: <<IToC:c\d+>> TypeConversion [<<Sel>>]
+## CHECK-DAG: Return [<<IToC>>]
+
+## CHECK-START: char SmaliTests.booleanToChar(boolean) instruction_simplifier$after_bce (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: Return [<<Arg>>]
+.method static booleanToChar(Z)C
+ .registers 2
+ if-eqz p0, :cond_5
+ const/4 v0, 0x1
+
+ :goto_3
+ int-to-char v0, v0
+ return v0
+
+ :cond_5
+ const/4 v0, 0x0
+ goto :goto_3
+.end method
+
+## CHECK-START: int SmaliTests.booleanToInt(boolean) builder (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
+## CHECK-DAG: If [<<Cond>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: Return [<<Phi>>]
+
+## CHECK-START: int SmaliTests.booleanToInt(boolean) select_generator (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
+## CHECK-DAG: Return [<<Sel>>]
+
+## CHECK-START: int SmaliTests.booleanToInt(boolean) instruction_simplifier$after_bce (after)
+## CHECK: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: Return [<<Arg>>]
+.method static booleanToInt(Z)I
+ .registers 2
+ if-eqz p0, :cond_4
+ const/4 v0, 0x1
+
+ :goto_3
+ return v0
+
+ :cond_4
+ const/4 v0, 0x0
+ goto :goto_3
+.end method
+
## CHECK-START: long SmaliTests.booleanToLong(boolean) builder (after)
## CHECK-DAG: <<Arg:z\d+>> ParameterValue
## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
diff --git a/test/593-checker-boolean-2-integral-conv/src/Main.java b/test/593-checker-boolean-2-integral-conv/src/Main.java
index 3503b2e877..fdc0919f2b 100644
--- a/test/593-checker-boolean-2-integral-conv/src/Main.java
+++ b/test/593-checker-boolean-2-integral-conv/src/Main.java
@@ -32,24 +32,6 @@ public class Main {
System.out.println("passed");
}
- /// CHECK-START: byte Main.booleanToByte(boolean) builder (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
- /// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<IToS:b\d+>> TypeConversion [<<Phi>>]
- /// CHECK-DAG: Return [<<IToS>>]
-
- /// CHECK-START: byte Main.booleanToByte(boolean) select_generator (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
- /// CHECK-DAG: <<IToS:b\d+>> TypeConversion [<<Sel>>]
- /// CHECK-DAG: Return [<<IToS>>]
-
/// CHECK-START: byte Main.booleanToByte(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -58,24 +40,6 @@ public class Main {
return (byte)(b ? 1 : 0);
}
- /// CHECK-START: short Main.booleanToShort(boolean) builder (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
- /// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<IToS:s\d+>> TypeConversion [<<Phi>>]
- /// CHECK-DAG: Return [<<IToS>>]
-
- /// CHECK-START: short Main.booleanToShort(boolean) select_generator (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
- /// CHECK-DAG: <<IToS:s\d+>> TypeConversion [<<Sel>>]
- /// CHECK-DAG: Return [<<IToS>>]
-
/// CHECK-START: short Main.booleanToShort(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -84,24 +48,6 @@ public class Main {
return (short)(b ? 1 : 0);
}
- /// CHECK-START: char Main.booleanToChar(boolean) builder (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
- /// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<IToC:c\d+>> TypeConversion [<<Phi>>]
- /// CHECK-DAG: Return [<<IToC>>]
-
- /// CHECK-START: char Main.booleanToChar(boolean) select_generator (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
- /// CHECK-DAG: <<IToC:c\d+>> TypeConversion [<<Sel>>]
- /// CHECK-DAG: Return [<<IToC>>]
-
/// CHECK-START: char Main.booleanToChar(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
@@ -110,22 +56,6 @@ public class Main {
return (char)(b ? 1 : 0);
}
- /// CHECK-START: int Main.booleanToInt(boolean) builder (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Zero>>]
- /// CHECK-DAG: If [<<Cond>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: Return [<<Phi>>]
-
- /// CHECK-START: int Main.booleanToInt(boolean) select_generator (after)
- /// CHECK: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
- /// CHECK-DAG: Return [<<Sel>>]
-
/// CHECK-START: int Main.booleanToInt(boolean) instruction_simplifier$after_bce (after)
/// CHECK: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
diff --git a/test/611-checker-simplify-if/build b/test/611-checker-simplify-if/build
deleted file mode 100644
index 10ffcc537d..0000000000
--- a/test/611-checker-simplify-if/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index f6d3bbab28..3ef8fe64bb 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -281,16 +281,14 @@ public class Main {
}
/// CHECK-START: void Main.string2Bytes(char[], java.lang.String) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.string2Bytes(char[], java.lang.String) loop_optimization (after)
/// CHECK-NOT: VecLoad
//
/// CHECK-START-ARM64: void Main.string2Bytes(char[], java.lang.String) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
// NOTE: should correctly deal with compressed and uncompressed cases.
@@ -333,25 +331,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.oneBoth(short[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi:i\d+>>,<<Repl>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.oneBoth(short[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi:i\d+>>,<<Repl>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.oneBoth(short[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi:i\d+>>,<<Repl>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
//
// Bug b/37764324: integral same-length packed types can be mixed freely.
private static void oneBoth(short[] a, char[] b) {
@@ -382,12 +377,10 @@ public class Main {
/// CHECK-START-ARM: void Main.typeConv(byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1:i\d+>>] loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: <<Vadd:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi1>>,<<Vadd>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2:i\d+>>] loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Add>>] loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi2>>,<<Cnv>>] loop:<<Loop2>> outer_loop:none
@@ -395,12 +388,10 @@ public class Main {
/// CHECK-START-ARM64: void Main.typeConv(byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1:i\d+>>] loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: <<Vadd:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi1>>,<<Vadd>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2:i\d+>>] loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Add>>] loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi2>>,<<Cnv>>] loop:<<Loop2>> outer_loop:none
@@ -408,12 +399,10 @@ public class Main {
/// CHECK-START-MIPS64: void Main.typeConv(byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1:i\d+>>] loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: <<Vadd:d\d+>> VecAdd [<<Load>>,<<Repl>>] loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi1>>,<<Vadd>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Get:b\d+>> ArrayGet [{{l\d+}},<<Phi2:i\d+>>] loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<One>>] loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Add>>] loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi2>>,<<Cnv>>] loop:<<Loop2>> outer_loop:none
diff --git a/test/640-checker-boolean-simd/src/Main.java b/test/640-checker-boolean-simd/src/Main.java
index c337ef4fed..347f916c8d 100644
--- a/test/640-checker-boolean-simd/src/Main.java
+++ b/test/640-checker-boolean-simd/src/Main.java
@@ -26,25 +26,21 @@ public class Main {
//
/// CHECK-START: void Main.and(boolean) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.and(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAnd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.and(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAnd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.and(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAnd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void and(boolean x) {
@@ -53,25 +49,21 @@ public class Main {
}
/// CHECK-START: void Main.or(boolean) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.or(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecOr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.or(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecOr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.or(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecOr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void or(boolean x) {
@@ -80,25 +72,21 @@ public class Main {
}
/// CHECK-START: void Main.xor(boolean) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.xor(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecXor loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.xor(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecXor loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.xor(boolean) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecXor loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void xor(boolean x) {
@@ -107,25 +95,21 @@ public class Main {
}
/// CHECK-START: void Main.not() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
diff --git a/test/640-checker-byte-simd/src/Main.java b/test/640-checker-byte-simd/src/Main.java
index dc7aaf7f05..5c13fc3926 100644
--- a/test/640-checker-byte-simd/src/Main.java
+++ b/test/640-checker-byte-simd/src/Main.java
@@ -26,25 +26,21 @@ public class Main {
//
/// CHECK-START: void Main.add(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(int x) {
@@ -53,25 +49,21 @@ public class Main {
}
/// CHECK-START: void Main.sub(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(int x) {
@@ -80,25 +72,21 @@ public class Main {
}
/// CHECK-START: void Main.mul(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(int x) {
@@ -107,8 +95,7 @@ public class Main {
}
/// CHECK-START: void Main.div(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.div(int) loop_optimization (after)
@@ -121,25 +108,21 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -148,25 +131,21 @@ public class Main {
}
/// CHECK-START: void Main.not() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
@@ -175,25 +154,21 @@ public class Main {
}
/// CHECK-START: void Main.shl4() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
@@ -202,25 +177,21 @@ public class Main {
}
/// CHECK-START: void Main.sar2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
@@ -229,8 +200,7 @@ public class Main {
}
/// CHECK-START: void Main.shr2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
// TODO: would need signess flip.
diff --git a/test/640-checker-char-simd/src/Main.java b/test/640-checker-char-simd/src/Main.java
index 0ba596389d..b3dff1411b 100644
--- a/test/640-checker-char-simd/src/Main.java
+++ b/test/640-checker-char-simd/src/Main.java
@@ -26,25 +26,21 @@ public class Main {
//
/// CHECK-START: void Main.add(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(int x) {
@@ -53,25 +49,21 @@ public class Main {
}
/// CHECK-START: void Main.sub(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(int x) {
@@ -80,25 +72,21 @@ public class Main {
}
/// CHECK-START: void Main.mul(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(int x) {
@@ -107,8 +95,7 @@ public class Main {
}
/// CHECK-START: void Main.div(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.div(int) loop_optimization (after)
@@ -121,25 +108,21 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -148,25 +131,21 @@ public class Main {
}
/// CHECK-START: void Main.not() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
@@ -175,25 +154,21 @@ public class Main {
}
/// CHECK-START: void Main.shl4() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
@@ -202,8 +177,7 @@ public class Main {
}
/// CHECK-START: void Main.sar2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
// TODO: would need signess flip.
@@ -215,25 +189,21 @@ public class Main {
}
/// CHECK-START: void Main.shr2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shr2() {
diff --git a/test/640-checker-double-simd/src/Main.java b/test/640-checker-double-simd/src/Main.java
index f7492d5578..5d0899864a 100644
--- a/test/640-checker-double-simd/src/Main.java
+++ b/test/640-checker-double-simd/src/Main.java
@@ -27,19 +27,16 @@ public class Main {
//
/// CHECK-START: void Main.add(double) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(double x) {
@@ -48,19 +45,16 @@ public class Main {
}
/// CHECK-START: void Main.sub(double) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(double x) {
@@ -69,19 +63,16 @@ public class Main {
}
/// CHECK-START: void Main.mul(double) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.mul(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(double x) {
@@ -90,19 +81,16 @@ public class Main {
}
/// CHECK-START: void Main.div(double) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.div(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.div(double) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void div(double x) {
@@ -111,19 +99,16 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -132,19 +117,16 @@ public class Main {
}
/// CHECK-START: void Main.abs() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.abs() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.abs() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void abs() {
@@ -153,8 +135,7 @@ public class Main {
}
/// CHECK-START: void Main.conv(long[]) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.conv(long[]) loop_optimization (after)
diff --git a/test/640-checker-float-simd/src/Main.java b/test/640-checker-float-simd/src/Main.java
index 4fe9675afe..c7883f37a3 100644
--- a/test/640-checker-float-simd/src/Main.java
+++ b/test/640-checker-float-simd/src/Main.java
@@ -27,19 +27,16 @@ public class Main {
//
/// CHECK-START: void Main.add(float) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(float x) {
@@ -48,19 +45,16 @@ public class Main {
}
/// CHECK-START: void Main.sub(float) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(float x) {
@@ -69,19 +63,16 @@ public class Main {
}
/// CHECK-START: void Main.mul(float) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.mul(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(float x) {
@@ -90,19 +81,16 @@ public class Main {
}
/// CHECK-START: void Main.div(float) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.div(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.div(float) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecDiv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void div(float x) {
@@ -111,19 +99,16 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -132,19 +117,16 @@ public class Main {
}
/// CHECK-START: void Main.abs() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.abs() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.abs() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void abs() {
@@ -153,19 +135,16 @@ public class Main {
}
/// CHECK-START: void Main.conv(int[]) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.conv(int[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecCnv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.conv(int[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecCnv loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void conv(int[] b) {
diff --git a/test/640-checker-int-simd/src/Main.java b/test/640-checker-int-simd/src/Main.java
index 10dd340129..aa230bfcaf 100644
--- a/test/640-checker-int-simd/src/Main.java
+++ b/test/640-checker-int-simd/src/Main.java
@@ -26,25 +26,21 @@ public class Main {
//
/// CHECK-START: void Main.add(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(int x) {
@@ -53,25 +49,21 @@ public class Main {
}
/// CHECK-START: void Main.sub(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(int x) {
@@ -80,25 +72,21 @@ public class Main {
}
/// CHECK-START: void Main.mul(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(int x) {
@@ -107,8 +95,7 @@ public class Main {
}
/// CHECK-START: void Main.div(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.div(int) loop_optimization (after)
@@ -122,25 +109,21 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -149,25 +132,21 @@ public class Main {
}
/// CHECK-START: void Main.not() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
- //
+ //
/// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
@@ -176,25 +155,21 @@ public class Main {
}
/// CHECK-START: void Main.shl4() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
- //
+ //
/// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
@@ -203,25 +178,21 @@ public class Main {
}
/// CHECK-START: void Main.sar2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
@@ -230,25 +201,21 @@ public class Main {
}
/// CHECK-START: void Main.shr2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shr2() {
@@ -267,30 +234,25 @@ public class Main {
/// CHECK-START: void Main.shr32() instruction_simplifier$after_inlining (before)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 32 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.shr32() instruction_simplifier$after_inlining (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shr32() loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr32() loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr32() loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
static void shr32() {
// TODO: remove a[i] = a[i] altogether?
for (int i = 0; i < 128; i++)
@@ -299,38 +261,33 @@ public class Main {
/// CHECK-START: void Main.shr33() instruction_simplifier$after_inlining (before)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 33 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.shr33() instruction_simplifier$after_inlining (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shr33() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr33() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr33() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
static void shr33() {
for (int i = 0; i < 128; i++)
a[i] >>>= $opt$inline$IntConstant33(); // 1, since & 31
@@ -338,38 +295,33 @@ public class Main {
/// CHECK-START: void Main.shrMinus254() instruction_simplifier$after_inlining (before)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant -254 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.shrMinus254() instruction_simplifier$after_inlining (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shrMinus254() loop_optimization (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shrMinus254() loop_optimization (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shrMinus254() loop_optimization (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
static void shrMinus254() {
for (int i = 0; i < 128; i++)
a[i] >>>= $opt$inline$IntConstantMinus254(); // 2, since & 31
diff --git a/test/640-checker-long-simd/src/Main.java b/test/640-checker-long-simd/src/Main.java
index 05dcae6f83..c754f2a309 100644
--- a/test/640-checker-long-simd/src/Main.java
+++ b/test/640-checker-long-simd/src/Main.java
@@ -26,19 +26,16 @@ public class Main {
//
/// CHECK-START: void Main.add(long) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(long) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(long) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(long x) {
@@ -47,19 +44,16 @@ public class Main {
}
/// CHECK-START: void Main.sub(long) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(long) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(long) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(long x) {
@@ -68,13 +62,11 @@ public class Main {
}
/// CHECK-START: void Main.mul(long) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(long) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
@@ -87,8 +79,7 @@ public class Main {
}
/// CHECK-START: void Main.div(long) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.div(long) loop_optimization (after)
@@ -102,19 +93,16 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -123,19 +111,16 @@ public class Main {
}
/// CHECK-START: void Main.not() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
@@ -144,19 +129,16 @@ public class Main {
}
/// CHECK-START: void Main.shl4() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
@@ -165,19 +147,16 @@ public class Main {
}
/// CHECK-START: void Main.sar2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
@@ -186,19 +165,16 @@ public class Main {
}
/// CHECK-START: void Main.shr2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecUShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shr2() {
@@ -217,25 +193,21 @@ public class Main {
/// CHECK-START: void Main.shr64() instruction_simplifier$after_inlining (before)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 64 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.shr64() instruction_simplifier$after_inlining (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr64() loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr64() loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>> outer_loop:none
static void shr64() {
// TODO: remove a[i] = a[i] altogether?
for (int i = 0; i < 128; i++)
@@ -244,31 +216,27 @@ public class Main {
/// CHECK-START: void Main.shr65() instruction_simplifier$after_inlining (before)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 65 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.shr65() instruction_simplifier$after_inlining (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shr65() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shr65() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
static void shr65() {
for (int i = 0; i < 128; i++)
a[i] >>>= $opt$inline$IntConstant65(); // 1, since & 63
@@ -276,31 +244,27 @@ public class Main {
/// CHECK-START: void Main.shrMinus254() instruction_simplifier$after_inlining (before)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant -254 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.shrMinus254() instruction_simplifier$after_inlining (after)
/// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:j\d+>> ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shrMinus254() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shrMinus254() loop_optimization (after)
- /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>> outer_loop:none
static void shrMinus254() {
for (int i = 0; i < 128; i++)
a[i] >>>= $opt$inline$IntConstantMinus254(); // 2, since & 63
diff --git a/test/640-checker-short-simd/src/Main.java b/test/640-checker-short-simd/src/Main.java
index 9dc084d1df..e187397853 100644
--- a/test/640-checker-short-simd/src/Main.java
+++ b/test/640-checker-short-simd/src/Main.java
@@ -26,25 +26,21 @@ public class Main {
//
/// CHECK-START: void Main.add(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.add(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecAdd loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void add(int x) {
@@ -53,25 +49,21 @@ public class Main {
}
/// CHECK-START: void Main.sub(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sub(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecSub loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sub(int x) {
@@ -80,25 +72,21 @@ public class Main {
}
/// CHECK-START: void Main.mul(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.mul(int) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecMul loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void mul(int x) {
@@ -107,8 +95,7 @@ public class Main {
}
/// CHECK-START: void Main.div(int) loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START: void Main.div(int) loop_optimization (after)
@@ -121,25 +108,21 @@ public class Main {
}
/// CHECK-START: void Main.neg() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.neg() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNeg loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void neg() {
@@ -148,25 +131,21 @@ public class Main {
}
/// CHECK-START: void Main.not() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.not() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecNot loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void not() {
@@ -175,25 +154,21 @@ public class Main {
}
/// CHECK-START: void Main.shl4() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.shl4() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShl loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void shl4() {
@@ -202,25 +177,21 @@ public class Main {
}
/// CHECK-START: void Main.sar2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.sar2() loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecShr loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
static void sar2() {
@@ -229,8 +200,7 @@ public class Main {
}
/// CHECK-START: void Main.shr2() loop_optimization (before)
- /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
// TODO: would need signess flip.
diff --git a/test/645-checker-abs-simd/src/Main.java b/test/645-checker-abs-simd/src/Main.java
index 57c51a601c..823908c20e 100644
--- a/test/645-checker-abs-simd/src/Main.java
+++ b/test/645-checker-abs-simd/src/Main.java
@@ -29,36 +29,30 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitByte(byte[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM64: void Main.doitByte(byte[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-MIPS64: void Main.doitByte(byte[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
@@ -91,36 +85,30 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitShort(short[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM64: void Main.doitShort(short[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-MIPS64: void Main.doitShort(short[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
@@ -138,12 +126,12 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitCastedChar(char[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
@@ -160,36 +148,30 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitInt(int[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM64: void Main.doitInt(int[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-MIPS64: void Main.doitInt(int[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
@@ -207,24 +189,20 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitLong(long[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsLong loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-MIPS64: void Main.doitLong(long[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsLong loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
@@ -239,27 +217,23 @@ public class Main {
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsFloat loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitFloat(float[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsFloat loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-MIPS64: void Main.doitFloat(float[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsFloat loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
@@ -277,24 +251,20 @@ public class Main {
/// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitDouble(double[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsDouble loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-MIPS64: void Main.doitDouble(double[]) loop_optimization (after)
- /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1:B\d+>> outer_loop:none
/// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsDouble loop:<<Loop2>> outer_loop:none
/// CHECK-DAG: ArraySet loop:<<Loop2>> outer_loop:none
//
diff --git a/test/646-checker-hadd-alt-byte/src/Main.java b/test/646-checker-hadd-alt-byte/src/Main.java
index 87f7688d4a..41aa40cd6d 100644
--- a/test/646-checker-hadd-alt-byte/src/Main.java
+++ b/test/646-checker-hadd-alt-byte/src/Main.java
@@ -40,25 +40,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -66,7 +63,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-START: void Main.halving_add_unsigned(byte[], byte[], byte[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -77,28 +74,35 @@ public class Main {
/// CHECK-DAG: <<Add:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -118,25 +122,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -144,7 +145,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-START: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -156,28 +157,36 @@ public class Main {
/// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -196,28 +205,25 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -225,7 +231,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (before)
+ /// CHECK-START: void Main.halving_add_unsigned_constant(byte[], byte[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -234,31 +240,38 @@ public class Main {
/// CHECK-DAG: <<Add:i\d+>> Add [<<And>>,<<I255>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<I255>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
diff --git a/test/646-checker-hadd-alt-char/src/Main.java b/test/646-checker-hadd-alt-char/src/Main.java
index 292ea1b826..8f879c77d0 100644
--- a/test/646-checker-hadd-alt-char/src/Main.java
+++ b/test/646-checker-hadd-alt-char/src/Main.java
@@ -40,25 +40,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -66,7 +63,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
+ /// CHECK-START: void Main.halving_add_also_unsigned(char[], char[], char[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -77,28 +74,36 @@ public class Main {
/// CHECK-DAG: <<Add:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
//
@@ -121,25 +126,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -147,7 +149,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
+ /// CHECK-START: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -159,28 +161,36 @@ public class Main {
/// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
//
@@ -202,28 +212,25 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(char[] b1, char[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -231,7 +238,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (before)
+ /// CHECK-START: void Main.halving_add_also_unsigned_constant(char[], char[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -240,23 +247,38 @@ public class Main {
/// CHECK-DAG: <<Add:i\d+>> Add [<<And>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START-ARM: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
+ //
/// CHECK-START-ARM64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
//
diff --git a/test/646-checker-hadd-alt-short/src/Main.java b/test/646-checker-hadd-alt-short/src/Main.java
index da94829e3d..b591081fba 100644
--- a/test/646-checker-hadd-alt-short/src/Main.java
+++ b/test/646-checker-hadd-alt-short/src/Main.java
@@ -40,25 +40,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -66,7 +63,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-START: void Main.halving_add_unsigned(short[], short[], short[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -77,28 +74,35 @@ public class Main {
/// CHECK-DAG: <<Add:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -118,25 +122,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -144,7 +145,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-START: void Main.rounding_halving_add_unsigned(short[], short[], short[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -156,28 +157,36 @@ public class Main {
/// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -196,28 +205,25 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -225,7 +231,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (before)
+ /// CHECK-START: void Main.halving_add_unsigned_constant(short[], short[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -234,31 +240,38 @@ public class Main {
/// CHECK-DAG: <<Add:i\d+>> Add [<<And>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
diff --git a/test/646-checker-hadd-byte/src/Main.java b/test/646-checker-hadd-byte/src/Main.java
index f0adca301e..4d259c437b 100644
--- a/test/646-checker-hadd-byte/src/Main.java
+++ b/test/646-checker-hadd-byte/src/Main.java
@@ -37,25 +37,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -63,7 +60,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-START: void Main.halving_add_unsigned(byte[], byte[], byte[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -74,28 +71,35 @@ public class Main {
/// CHECK-DAG: <<Add:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -115,25 +119,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -141,7 +142,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-START: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -153,28 +154,36 @@ public class Main {
/// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -193,28 +202,25 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I127:i\d+>> IntConstant 127 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -222,7 +228,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (before)
+ /// CHECK-START: void Main.halving_add_unsigned_constant(byte[], byte[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -231,31 +237,38 @@ public class Main {
/// CHECK-DAG: <<Add:i\d+>> Add [<<And>>,<<I255>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<I255>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
diff --git a/test/646-checker-hadd-char/src/Main.java b/test/646-checker-hadd-char/src/Main.java
index 94030cc298..6549dab9ff 100644
--- a/test/646-checker-hadd-char/src/Main.java
+++ b/test/646-checker-hadd-char/src/Main.java
@@ -37,25 +37,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -63,7 +60,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
+ /// CHECK-START: void Main.halving_add_also_unsigned(char[], char[], char[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -74,28 +71,35 @@ public class Main {
/// CHECK-DAG: <<Add:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
//
@@ -118,25 +122,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -144,7 +145,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
+ /// CHECK-START: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -156,28 +157,36 @@ public class Main {
/// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
//
@@ -199,28 +208,23 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(char[] b1, char[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -228,7 +232,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (before)
+ /// CHECK-START: void Main.halving_add_also_unsigned_constant(char[], char[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -237,31 +241,38 @@ public class Main {
/// CHECK-DAG: <<Add:i\d+>> Add [<<And>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
//
diff --git a/test/646-checker-hadd-short/src/Main.java b/test/646-checker-hadd-short/src/Main.java
index 4ed23563b8..55bb958670 100644
--- a/test/646-checker-hadd-short/src/Main.java
+++ b/test/646-checker-hadd-short/src/Main.java
@@ -37,25 +37,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -78,25 +75,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_alt(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -105,7 +99,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-START: void Main.halving_add_unsigned(short[], short[], short[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -116,28 +110,35 @@ public class Main {
/// CHECK-DAG: <<Add:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -157,25 +158,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -195,25 +193,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed_alt(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -237,25 +232,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_signed_alt2(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed_alt2(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed_alt2(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed_alt2(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -264,7 +256,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-START: void Main.rounding_halving_add_unsigned(short[], short[], short[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -276,28 +268,36 @@ public class Main {
/// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -305,7 +305,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-START: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -317,28 +317,36 @@ public class Main {
/// CHECK-DAG: <<Add2:i\d+>> Add [<<And1>>,<<Add1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get2>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add2:i\d+>> Add [<<Get1>>,<<Add1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned_alt(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
@@ -358,28 +366,25 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
@@ -387,7 +392,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (before)
+ /// CHECK-START: void Main.halving_add_unsigned_constant(short[], short[]) instruction_simplifier (before)
/// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
@@ -396,31 +401,38 @@ public class Main {
/// CHECK-DAG: <<Add:i\d+>> Add [<<And>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<I1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
for (int i = 0; i < min_length; i++) {
diff --git a/test/651-checker-byte-simd-minmax/src/Main.java b/test/651-checker-byte-simd-minmax/src/Main.java
index 44472a87cb..d365689f5d 100644
--- a/test/651-checker-byte-simd-minmax/src/Main.java
+++ b/test/651-checker-byte-simd-minmax/src/Main.java
@@ -28,25 +28,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -54,7 +51,7 @@ public class Main {
}
}
- /// CHECK-START-ARM64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-START: void Main.doitMinUnsigned(byte[], byte[], byte[]) instruction_simplifier (before)
/// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -63,28 +60,33 @@ public class Main {
/// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:i\d+>> InvokeStaticOrDirect [<<And1>>,<<And2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:i\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMinUnsigned(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -101,25 +103,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -127,7 +126,7 @@ public class Main {
}
}
- /// CHECK-START-ARM64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-START: void Main.doitMaxUnsigned(byte[], byte[], byte[]) instruction_simplifier (before)
/// CHECK-DAG: <<I255:i\d+>> IntConstant 255 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -136,28 +135,33 @@ public class Main {
/// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:i\d+>> InvokeStaticOrDirect [<<And1>>,<<And2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:a\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:i\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMaxUnsigned(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -174,12 +178,11 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin100(byte[], byte[]) loop_optimization (after)
- /// CHECK-DAG: <<I100:i\d+>> IntConstant 100 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I100:i\d+>> IntConstant 100 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin100(byte[] x, byte[] y) {
int min = Math.min(x.length, y.length);
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-char-simd-minmax/src/Main.java b/test/651-checker-char-simd-minmax/src/Main.java
index 93f21f823b..72e8958ad8 100644
--- a/test/651-checker-char-simd-minmax/src/Main.java
+++ b/test/651-checker-char-simd-minmax/src/Main.java
@@ -28,25 +28,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(char[] x, char[] y, char[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -63,25 +60,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(char[] x, char[] y, char[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -98,12 +92,11 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin100(char[], char[]) loop_optimization (after)
- /// CHECK-DAG: <<I100:i\d+>> IntConstant 100 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I100:i\d+>> IntConstant 100 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin100(char[] x, char[] y) {
int min = Math.min(x.length, y.length);
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-double-simd-minmax/src/Main.java b/test/651-checker-double-simd-minmax/src/Main.java
index 23a6d54d9e..6b12e7e63c 100644
--- a/test/651-checker-double-simd-minmax/src/Main.java
+++ b/test/651-checker-double-simd-minmax/src/Main.java
@@ -30,11 +30,10 @@ public class Main {
// TODO MIPS64: min(x, NaN)?
//
/// CHECK-START-ARM64: void Main.doitMin(double[], double[], double[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(double[] x, double[] y, double[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -53,11 +52,10 @@ public class Main {
// TODO MIPS64: max(x, NaN)?
//
/// CHECK-START-ARM64: void Main.doitMax(double[], double[], double[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(double[] x, double[] y, double[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-float-simd-minmax/src/Main.java b/test/651-checker-float-simd-minmax/src/Main.java
index 3959c821c4..278a9c9367 100644
--- a/test/651-checker-float-simd-minmax/src/Main.java
+++ b/test/651-checker-float-simd-minmax/src/Main.java
@@ -30,11 +30,10 @@ public class Main {
// TODO MIPS64: min(x, NaN)?
//
/// CHECK-START-ARM64: void Main.doitMin(float[], float[], float[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(float[] x, float[] y, float[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -53,11 +52,10 @@ public class Main {
// TODO MIPS64: max(x, NaN)?
//
/// CHECK-START-ARM64: void Main.doitMax(float[], float[], float[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(float[] x, float[] y, float[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-int-simd-minmax/src/Main.java b/test/651-checker-int-simd-minmax/src/Main.java
index 11b67b84d3..598106e604 100644
--- a/test/651-checker-int-simd-minmax/src/Main.java
+++ b/test/651-checker-int-simd-minmax/src/Main.java
@@ -27,25 +27,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMin(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMin(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -61,25 +58,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMax(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMax(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMax(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-long-simd-minmax/src/Main.java b/test/651-checker-long-simd-minmax/src/Main.java
index 6289a1e3bb..6eff96616e 100644
--- a/test/651-checker-long-simd-minmax/src/Main.java
+++ b/test/651-checker-long-simd-minmax/src/Main.java
@@ -32,11 +32,10 @@ public class Main {
/// CHECK-NOT: VecMin
//
/// CHECK-START-MIPS64: void Main.doitMin(long[], long[], long[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(long[] x, long[] y, long[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -58,11 +57,10 @@ public class Main {
/// CHECK-NOT: VecMax
//
/// CHECK-START-MIPS64: void Main.doitMax(long[], long[], long[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMax(long[] x, long[] y, long[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/651-checker-short-simd-minmax/src/Main.java b/test/651-checker-short-simd-minmax/src/Main.java
index 00569e4ae1..d8c4d1e87e 100644
--- a/test/651-checker-short-simd-minmax/src/Main.java
+++ b/test/651-checker-short-simd-minmax/src/Main.java
@@ -28,25 +28,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -54,7 +51,7 @@ public class Main {
}
}
- /// CHECK-START-ARM64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-START: void Main.doitMinUnsigned(short[], short[], short[]) instruction_simplifier (before)
/// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -63,28 +60,33 @@ public class Main {
/// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:i\d+>> InvokeStaticOrDirect [<<And1>>,<<And2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:i\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMinUnsigned(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -101,25 +103,22 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -127,7 +126,7 @@ public class Main {
}
}
- /// CHECK-START-ARM64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-START: void Main.doitMaxUnsigned(short[], short[], short[]) instruction_simplifier (before)
/// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535 loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
@@ -136,28 +135,33 @@ public class Main {
/// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:i\d+>> InvokeStaticOrDirect [<<And1>>,<<And2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:i\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMaxUnsigned(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -174,12 +178,11 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin100(short[], short[]) loop_optimization (after)
- /// CHECK-DAG: <<I100:i\d+>> IntConstant 100 loop:none
- /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
- /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<I100:i\d+>> IntConstant 100 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
+ /// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin100(short[] x, short[] y) {
int min = Math.min(x.length, y.length);
for (int i = 0; i < min; i++) {
diff --git a/test/660-checker-simd-sad-int/src/Main.java b/test/660-checker-simd-sad-int/src/Main.java
index 338e841aad..388bfba0d2 100644
--- a/test/660-checker-simd-sad-int/src/Main.java
+++ b/test/660-checker-simd-sad-int/src/Main.java
@@ -32,26 +32,22 @@ public class Main {
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: int Main.sadInt2Int(int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Ld1:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Ld2:d\d+>> VecLoad [{{l\d+}},<<I>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi>>,<<Ld1>>,<<Ld2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: int Main.sadInt2Int(int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Ld1:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Ld2:d\d+>> VecLoad [{{l\d+}},<<I>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi>>,<<Ld1>>,<<Ld2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
private static int sadInt2Int(int[] x, int[] y) {
int min_length = Math.min(x.length, y.length);
int sad = 0;
@@ -105,26 +101,22 @@ public class Main {
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM: int Main.sadInt2IntAlt2(int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Ld1:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Ld2:d\d+>> VecLoad [{{l\d+}},<<I>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi>>,<<Ld1>>,<<Ld2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: int Main.sadInt2IntAlt2(int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Ld1:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Ld2:d\d+>> VecLoad [{{l\d+}},<<I>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi>>,<<Ld1>>,<<Ld2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
private static int sadInt2IntAlt2(int[] x, int[] y) {
int min_length = Math.min(x.length, y.length);
int sad = 0;
diff --git a/test/660-checker-simd-sad-short2/src/Main.java b/test/660-checker-simd-sad-short2/src/Main.java
index 7acc490536..708f3aa145 100644
--- a/test/660-checker-simd-sad-short2/src/Main.java
+++ b/test/660-checker-simd-sad-short2/src/Main.java
@@ -56,13 +56,15 @@ public class Main {
return sad;
}
- /// CHECK-START: int Main.sadCastedChar2Int(char[], char[]) loop_optimization (before)
+ /// CHECK-START: int Main.sadCastedChar2Int(char[], char[]) instruction_simplifier (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<BC1:i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<BC2:i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<BC1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<BC2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv1:s\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv2:s\d+>> TypeConversion [<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Cnv1>>,<<Cnv2>>] loop:<<Loop>> outer_loop:none
@@ -70,6 +72,18 @@ public class Main {
/// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START: int Main.sadCastedChar2Int(char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
/// CHECK-START-ARM64: int Main.sadCastedChar2Int(char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
@@ -89,16 +103,33 @@ public class Main {
return sad;
}
+ /// CHECK-START: int Main.sadCastedChar2IntAlt(char[], char[]) instruction_simplifier (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<BC1:i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<BC2:i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<BC1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<BC2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv1:s\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv2:s\d+>> TypeConversion [<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub1:i\d+>> Sub [<<Cnv2>>,<<Cnv1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub2:i\d+>> Sub [<<Cnv1>>,<<Cnv2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Sub2>>,<<Sub1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Phi3>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
/// CHECK-START: int Main.sadCastedChar2IntAlt(char[], char[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ // Note: Get1+Cnv1 not simplified yet due to env use of Get1 in NullCheck for s2[i].
/// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv1:s\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Cnv2:s\d+>> TypeConversion [<<Get2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Cnv2>>,<<Cnv1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get2>>,<<Cnv1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
@@ -124,16 +155,33 @@ public class Main {
return sad;
}
- /// CHECK-START: int Main.sadCastedChar2IntAlt2(char[], char[]) loop_optimization (before)
+ /// CHECK-START: int Main.sadCastedChar2IntAlt2(char[], char[]) instruction_simplifier (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<BC1:\i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<BC2:\i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<BC1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<BC2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv1:s\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv2:s\d+>> TypeConversion [<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Cnv1>>,<<Cnv2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Neg:i\d+>> Neg [<<Sub>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Sub>>,<<Neg>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Phi3>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.sadCastedChar2IntAlt2(char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ // Note: Get1+Cnv1 not simplified yet due to env use of Get1 in NullCheck for s2[i].
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv1:s\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Cnv1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
@@ -161,14 +209,16 @@ public class Main {
return sad;
}
- /// CHECK-START: long Main.sadCastedChar2Long(char[], char[]) loop_optimization (before)
+ /// CHECK-START: long Main.sadCastedChar2Long(char[], char[]) instruction_simplifier (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<ConsL:j\d+>> LongConstant 0 loop:none
/// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:j\d+>> Phi [<<ConsL>>,{{j\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<BC1:\i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<BC2:\i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<BC1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<BC2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv1:s\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv2:s\d+>> TypeConversion [<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv3:j\d+>> TypeConversion [<<Cnv1>>] loop:<<Loop>> outer_loop:none
@@ -178,6 +228,21 @@ public class Main {
/// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START: long Main.sadCastedChar2Long(char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<ConsL:j\d+>> LongConstant 0 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:j\d+>> Phi [<<ConsL>>,{{j\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv1:j\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv2:j\d+>> TypeConversion [<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:j\d+>> Sub [<<Cnv1>>,<<Cnv2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:j\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsLong loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
/// CHECK-START-ARM64: long Main.sadCastedChar2Long(char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
@@ -200,14 +265,16 @@ public class Main {
return sad;
}
- /// CHECK-START: long Main.sadCastedChar2LongAt1(char[], char[]) loop_optimization (before)
+ /// CHECK-START: long Main.sadCastedChar2LongAt1(char[], char[]) instruction_simplifier (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<ConsL:j\d+>> LongConstant 1 loop:none
/// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:j\d+>> Phi [<<ConsL>>,{{j\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<BC1:\i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<BC2:\i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<BC1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<BC2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv1:s\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv2:s\d+>> TypeConversion [<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv3:j\d+>> TypeConversion [<<Cnv1>>] loop:<<Loop>> outer_loop:none
@@ -217,6 +284,21 @@ public class Main {
/// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
//
+ /// CHECK-START: long Main.sadCastedChar2LongAt1(char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<ConsL:j\d+>> LongConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:j\d+>> Phi [<<ConsL>>,{{j\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv1:j\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv2:j\d+>> TypeConversion [<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:j\d+>> Sub [<<Cnv1>>,<<Cnv2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:j\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsLong loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
/// CHECK-START-ARM64: long Main.sadCastedChar2LongAt1(char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
diff --git a/test/661-checker-simd-reduc/src/Main.java b/test/661-checker-simd-reduc/src/Main.java
index 0b425d8bce..1add0f1026 100644
--- a/test/661-checker-simd-reduc/src/Main.java
+++ b/test/661-checker-simd-reduc/src/Main.java
@@ -63,27 +63,33 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM: int Main.reductionInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
//
/// CHECK-START-ARM64: int Main.reductionInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: int Main.reductionInt(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
private static int reductionInt(int[] x) {
int sum = 0;
@@ -111,54 +117,63 @@ public class Main {
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM: int Main.reductionIntChain() loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [<<Cons1>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Red1:d\d+>> VecReduce [<<Phi2>>] loop:none
- /// CHECK-DAG: <<Extr1:i\d+>> VecExtractScalar [<<Red1>>] loop:none
- /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [<<Extr1>>] loop:none
- /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi4:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi3>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi4>>,<<Load2>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi3>>,<<Cons2>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: <<Red2:d\d+>> VecReduce [<<Phi4>>] loop:none
- /// CHECK-DAG: <<Extr2:i\d+>> VecExtractScalar [<<Red2>>] loop:none
- /// CHECK-DAG: Return [<<Extr2>>] loop:none
+ /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi1:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<I1:i\d+>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi1>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Add [<<I1>>,<<Cons2>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Red1:d\d+>> VecReduce [<<Phi1>>] loop:none
+ /// CHECK-DAG: <<Extr1:i\d+>> VecExtractScalar [<<Red1>>] loop:none
+ /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<I2:i\d+>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: Add [<<I2>>,<<Cons2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Red2:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Extr2:i\d+>> VecExtractScalar [<<Red2>>] loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM64: int Main.reductionIntChain() loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [<<Cons1>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Red1:d\d+>> VecReduce [<<Phi2>>] loop:none
- /// CHECK-DAG: <<Extr1:i\d+>> VecExtractScalar [<<Red1>>] loop:none
- /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [<<Extr1>>] loop:none
- /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi4:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi3>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi4>>,<<Load2>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi3>>,<<Cons4>>] loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: <<Red2:d\d+>> VecReduce [<<Phi4>>] loop:none
- /// CHECK-DAG: <<Extr2:i\d+>> VecExtractScalar [<<Red2>>] loop:none
- /// CHECK-DAG: Return [<<Extr2>>] loop:none
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi1:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<I1:i\d+>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi1>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Add [<<I1>>,<<Cons4>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Red1:d\d+>> VecReduce [<<Phi1>>] loop:none
+ /// CHECK-DAG: <<Extr1:i\d+>> VecExtractScalar [<<Red1>>] loop:none
+ /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<I2:i\d+>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: Add [<<I2>>,<<Cons4>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Red2:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Extr2:i\d+>> VecExtractScalar [<<Red2>>] loop:none
//
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
- // NOTE: pattern is robust with respect to vector loop unrolling.
+ /// CHECK-START-MIPS64: int Main.reductionIntChain() loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi1:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<I1:i\d+>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi1>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Add [<<I1>>,<<Cons4>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Red1:d\d+>> VecReduce [<<Phi1>>] loop:none
+ /// CHECK-DAG: <<Extr1:i\d+>> VecExtractScalar [<<Red1>>] loop:none
+ /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<I2:i\d+>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load2>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: Add [<<I2>>,<<Cons4>>] loop:<<Loop2>> outer_loop:none
+ /// CHECK-DAG: <<Red2:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Extr2:i\d+>> VecExtractScalar [<<Red2>>] loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ // NOTE: pattern is robust with respect to vector loop unrolling and peeling.
private static int reductionIntChain() {
int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 };
int r = 1;
@@ -185,39 +200,34 @@ public class Main {
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM: int Main.reductionIntToLoop(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
- /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Extr>>,{{i\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi4:i\d+>> Phi [<<Extr>>,{{i\d+}}] loop:<<Loop2>> outer_loop:none
- //
- /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START-ARM64: int Main.reductionIntToLoop(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load1>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop1>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
- /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Extr>>,{{i\d+}}] loop:<<Loop2:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi4:i\d+>> Phi [<<Extr>>,{{i\d+}}] loop:<<Loop2>> outer_loop:none
- //
- /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
+ /// CHECK-START-MIPS64: int Main.reductionIntToLoop(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
private static int reductionIntToLoop(int[] x) {
int r = 0;
for (int i = 0; i < 4; i++) {
@@ -241,16 +251,23 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM64: long Main.reductionLong(long[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Long0:j\d+>> LongConstant 0 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Long0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{j\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:j\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: long Main.reductionLong(long[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{j\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:j\d+>> VecExtractScalar [<<Red>>] loop:none
private static long reductionLong(long[] x) {
long sum = 0;
@@ -296,29 +313,33 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM: int Main.reductionIntM1(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<ConsM1:i\d+>> IntConstant -1 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<ConsM1>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
//
/// CHECK-START-ARM64: int Main.reductionIntM1(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<ConsM1:i\d+>> IntConstant -1 loop:none
/// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<ConsM1>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: int Main.reductionIntM1(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
private static int reductionIntM1(int[] x) {
int sum = -1;
@@ -340,16 +361,23 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM64: long Main.reductionLongM1(long[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<LongM1:j\d+>> LongConstant -1 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<LongM1>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecAdd [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{j\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:j\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: long Main.reductionLongM1(long[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{j\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:j\d+>> VecExtractScalar [<<Red>>] loop:none
private static long reductionLongM1(long[] x) {
long sum = -1L;
@@ -394,27 +422,33 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM: int Main.reductionMinusInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecSub [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
//
/// CHECK-START-ARM64: int Main.reductionMinusInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecSub [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: int Main.reductionMinusInt(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
private static int reductionMinusInt(int[] x) {
int sum = 0;
@@ -436,16 +470,23 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM64: long Main.reductionMinusLong(long[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Long0:j\d+>> LongConstant 0 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Long0>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecSub [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{j\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:j\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: long Main.reductionMinusLong(long[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [{{j\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecSub [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:j\d+>> VecExtractScalar [<<Red>>] loop:none
private static long reductionMinusLong(long[] x) {
long sum = 0;
@@ -491,29 +532,33 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM: int Main.reductionMinInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<ConsM:i\d+>> IntConstant 2147483647 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [<<ConsM>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecMin [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMin [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
//
/// CHECK-START-ARM64: int Main.reductionMinInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<ConsM:i\d+>> IntConstant 2147483647 loop:none
/// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [<<ConsM>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecMin [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMin [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: int Main.reductionMinInt(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMin [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
private static int reductionMinInt(int[] x) {
int min = Integer.MAX_VALUE;
@@ -567,29 +612,33 @@ public class Main {
/// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START-ARM: int Main.reductionMaxInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<ConsM:i\d+>> IntConstant -2147483648 loop:none
/// CHECK-DAG: <<Cons2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [<<ConsM>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecMax [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMax [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
//
/// CHECK-START-ARM64: int Main.reductionMaxInt(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<ConsM:i\d+>> IntConstant -2147483648 loop:none
/// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
- /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [<<ConsM>>] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecMax [<<Phi2>>,<<Load>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Add [<<Phi1>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMax [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
+ /// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
+ //
+ /// CHECK-START-MIPS64: int Main.reductionMaxInt(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons4:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecReplicateScalar [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<I:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecMax [<<Phi>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<I>>,<<Cons4>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Red:d\d+>> VecReduce [<<Phi>>] loop:none
/// CHECK-DAG: <<Extr:i\d+>> VecExtractScalar [<<Red>>] loop:none
private static int reductionMaxInt(int[] x) {
int max = Integer.MIN_VALUE;
diff --git a/test/665-checker-simd-zero/src/Main.java b/test/665-checker-simd-zero/src/Main.java
index 66eea642a4..6cd6d6465a 100644
--- a/test/665-checker-simd-zero/src/Main.java
+++ b/test/665-checker-simd-zero/src/Main.java
@@ -29,6 +29,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zeroz(boolean[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zeroz(boolean[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = false;
@@ -45,6 +51,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zerob(byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zerob(byte[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
@@ -61,6 +73,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zeroc(char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zeroc(char[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
@@ -77,6 +95,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zeros(short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zeros(short[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
@@ -93,6 +117,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zeroi(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zeroi(int[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
@@ -109,6 +139,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zerol(long[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:j\d+>> LongConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zerol(long[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
@@ -125,6 +161,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zerof(float[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:f\d+>> FloatConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zerof(float[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
@@ -141,6 +183,12 @@ public class Main {
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-MIPS64: void Main.zerod(double[]) loop_optimization (after)
+ /// CHECK-DAG: <<Zero:d\d+>> DoubleConstant 0 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Repl>>] loop:<<Loop>> outer_loop:none
private static void zerod(double[] x) {
for (int i = 0; i < x.length; i++) {
x[i] = 0;
diff --git a/test/667-out-of-bounds/expected.txt b/test/667-out-of-bounds/expected.txt
new file mode 100644
index 0000000000..e114c50371
--- /dev/null
+++ b/test/667-out-of-bounds/expected.txt
@@ -0,0 +1 @@
+java.lang.ArrayIndexOutOfBoundsException: length=5; index=82
diff --git a/test/667-out-of-bounds/info.txt b/test/667-out-of-bounds/info.txt
new file mode 100644
index 0000000000..19be6950e2
--- /dev/null
+++ b/test/667-out-of-bounds/info.txt
@@ -0,0 +1,3 @@
+Regression test for the x86/x64 backends which under certain
+cirumstances used to pass the wrong value for the length of
+an array when throwing an AIOOBE.
diff --git a/test/667-out-of-bounds/src/Main.java b/test/667-out-of-bounds/src/Main.java
new file mode 100644
index 0000000000..7842569a6c
--- /dev/null
+++ b/test/667-out-of-bounds/src/Main.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ static int $noinline$arrayAccess(int[] array) {
+ return array[82];
+ }
+
+ public static void main(String[] args) {
+ int[] array = new int[5];
+ try {
+ $noinline$arrayAccess(array);
+ } catch (Exception e) {
+ System.out.println(e);
+ }
+ }
+}
diff --git a/test/910-methods/check b/test/910-methods/check
index e6f7d7773f..76b23cb906 100644
--- a/test/910-methods/check
+++ b/test/910-methods/check
@@ -19,8 +19,14 @@ if [[ "$USE_JACK" == true ]]; then
patch -p0 expected.txt < expected_jack.diff
fi
-if [[ "$USE_D8" == true ]]; then
- patch -p0 expected.txt < expected_d8.diff
+./default-check "$@"
+if [[ "$?" == "0" ]]; then
+ exit 0;
fi
+# We cannot always correctly determine if D8 was used because of (b/68406220).
+# So we are just going to try to see it matches the expect output of D8 no
+# matter what.
+patch -p0 expected.txt < expected_d8.diff
+
./default-check "$@"
diff --git a/test/924-threads/expected.txt b/test/924-threads/expected.txt
index e52955992b..accc78208a 100644
--- a/test/924-threads/expected.txt
+++ b/test/924-threads/expected.txt
@@ -33,6 +33,7 @@ Thread type is class java.lang.Thread
401 = ALIVE|BLOCKED_ON_MONITOR_ENTER
e1 = ALIVE|WAITING_WITH_TIMEOUT|SLEEPING|WAITING
5 = ALIVE|RUNNABLE
+400005 = ALIVE|RUNNABLE|IN_NATIVE
2 = TERMINATED
Thread type is class art.Test924$ExtThread
0 = NEW
@@ -41,6 +42,7 @@ Thread type is class art.Test924$ExtThread
401 = ALIVE|BLOCKED_ON_MONITOR_ENTER
e1 = ALIVE|WAITING_WITH_TIMEOUT|SLEEPING|WAITING
5 = ALIVE|RUNNABLE
+400005 = ALIVE|RUNNABLE|IN_NATIVE
2 = TERMINATED
[Thread[FinalizerDaemon,5,system], Thread[FinalizerWatchdogDaemon,5,system], Thread[HeapTaskDaemon,5,system], Thread[ReferenceQueueDaemon,5,system], Thread[TestThread,5,main], Thread[main,5,main]]
JVMTI_ERROR_THREAD_NOT_ALIVE
diff --git a/test/924-threads/src/art/Test924.java b/test/924-threads/src/art/Test924.java
index 1ff2c3f644..e8e97817b7 100644
--- a/test/924-threads/src/art/Test924.java
+++ b/test/924-threads/src/art/Test924.java
@@ -109,6 +109,7 @@ public class Test924 {
final CountDownLatch cdl4 = new CountDownLatch(1);
final CountDownLatch cdl5 = new CountDownLatch(1);
final Holder h = new Holder();
+ final NativeWaiter w = new NativeWaiter();
Runnable r = new Runnable() {
@Override
public void run() {
@@ -136,6 +137,8 @@ public class Test924 {
while (!h.flag) {
// Busy-loop.
}
+
+ nativeLoop(w.struct);
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -193,6 +196,11 @@ public class Test924 {
printThreadState(t);
h.flag = true;
+ // Native
+ w.waitForNative();
+ printThreadState(t);
+ w.finish();
+
// Dying.
t.join();
Thread.yield();
@@ -427,6 +435,31 @@ public class Test924 {
System.out.println(threadInfo[4] == null ? "null" : threadInfo[4].getClass()); // Context CL.
}
+ public static final class NativeWaiter {
+ public long struct;
+ public NativeWaiter() {
+ struct = nativeWaiterStructAlloc();
+ }
+ public void waitForNative() {
+ if (struct == 0l) {
+ throw new Error("Already resumed from native!");
+ }
+ nativeWaiterStructWaitForNative(struct);
+ }
+ public void finish() {
+ if (struct == 0l) {
+ throw new Error("Already resumed from native!");
+ }
+ nativeWaiterStructFinish(struct);
+ struct = 0;
+ }
+ }
+
+ private static native long nativeWaiterStructAlloc();
+ private static native void nativeWaiterStructWaitForNative(long struct);
+ private static native void nativeWaiterStructFinish(long struct);
+ private static native void nativeLoop(long w);
+
private static native Thread getCurrentThread();
private static native Object[] getThreadInfo(Thread t);
private static native int getThreadState(Thread t);
diff --git a/test/924-threads/threads.cc b/test/924-threads/threads.cc
index e21dcc240e..8caff768c1 100644
--- a/test/924-threads/threads.cc
+++ b/test/924-threads/threads.cc
@@ -35,6 +35,46 @@
namespace art {
namespace Test924Threads {
+struct WaiterStruct {
+ std::atomic<bool> started;
+ std::atomic<bool> finish;
+};
+
+extern "C" JNIEXPORT jlong JNICALL Java_art_Test924_nativeWaiterStructAlloc(
+ JNIEnv* env, jclass TestClass ATTRIBUTE_UNUSED) {
+ WaiterStruct* s = nullptr;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->Allocate(sizeof(WaiterStruct),
+ reinterpret_cast<unsigned char**>(&s)))) {
+ return 0;
+ }
+ s->started = false;
+ s->finish = false;
+ return static_cast<jlong>(reinterpret_cast<intptr_t>(s));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test924_nativeWaiterStructWaitForNative(
+ JNIEnv* env ATTRIBUTE_UNUSED, jclass TestClass ATTRIBUTE_UNUSED, jlong waiter_struct) {
+ WaiterStruct* s = reinterpret_cast<WaiterStruct*>(static_cast<intptr_t>(waiter_struct));
+ while (!s->started) { }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test924_nativeWaiterStructFinish(
+ JNIEnv* env ATTRIBUTE_UNUSED, jclass TestClass ATTRIBUTE_UNUSED, jlong waiter_struct) {
+ WaiterStruct* s = reinterpret_cast<WaiterStruct*>(static_cast<intptr_t>(waiter_struct));
+ s->finish = true;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test924_nativeLoop(JNIEnv* env,
+ jclass TestClass ATTRIBUTE_UNUSED,
+ jlong waiter_struct) {
+ WaiterStruct* s = reinterpret_cast<WaiterStruct*>(static_cast<intptr_t>(waiter_struct));
+ s->started = true;
+ while (!s->finish) { }
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(s)));
+}
+
// private static native Thread getCurrentThread();
// private static native Object[] getThreadInfo(Thread t);
diff --git a/test/992-source-data/expected.txt b/test/992-source-data/expected.txt
index 4db8df0ada..7f59682b1d 100644
--- a/test/992-source-data/expected.txt
+++ b/test/992-source-data/expected.txt
@@ -1,10 +1,22 @@
class art.Test992 is defined in file "Test992.java"
+class art.Test992 does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
class art.Test992$Target1 is defined in file "Test992.java"
+class art.Test992$Target1 does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
class art.Target2 is defined in file "Target2.java"
+class art.Target2 does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
int does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
+int does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
class java.lang.Integer is defined in file "Integer.java"
+class java.lang.Integer does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
class java.lang.Object is defined in file "Object.java"
+class java.lang.Object does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
interface java.lang.Runnable is defined in file "Runnable.java"
+interface java.lang.Runnable does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
class [Ljava.lang.Object; does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
+class [Ljava.lang.Object; does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
class [I does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
+class [I does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
null does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_INVALID_CLASS
+null does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_INVALID_CLASS
+Proxy of [interface java.lang.Runnable] does not have a known source file because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
+Proxy of [interface java.lang.Runnable] does not have a known source file extension because java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION
diff --git a/test/992-source-data/source_file.cc b/test/992-source-data/source_file.cc
index 46d197d048..78687ff005 100644
--- a/test/992-source-data/source_file.cc
+++ b/test/992-source-data/source_file.cc
@@ -49,6 +49,19 @@ jstring JNICALL Java_art_Test992_getSourceFileName(JNIEnv* env,
return ret;
}
+extern "C" JNIEXPORT
+jstring JNICALL Java_art_Test992_getSourceDebugExtension(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jclass target) {
+ char* ext = nullptr;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetSourceDebugExtension(target, &ext))) {
+ return nullptr;
+ }
+ jstring ret = env->NewStringUTF(ext);
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(ext));
+ return ret;
+}
+
} // namespace Test992SourceFile
} // namespace art
diff --git a/test/992-source-data/src/art/Test992.java b/test/992-source-data/src/art/Test992.java
index d9ab112726..cc4f0c724c 100644
--- a/test/992-source-data/src/art/Test992.java
+++ b/test/992-source-data/src/art/Test992.java
@@ -16,6 +16,8 @@
package art;
+import java.lang.reflect.Proxy;
+import java.util.Arrays;
import java.util.Base64;
public class Test992 {
@@ -33,15 +35,30 @@ public class Test992 {
doTest(new Object[0].getClass());
doTest(new int[0].getClass());
doTest(null);
+ doTest(Proxy.getProxyClass(Test992.class.getClassLoader(), Runnable.class));
}
+ public static String printClass(Class<?> k) {
+ if (k != null && Proxy.class.isAssignableFrom(k)) {
+ return String.format("Proxy of %s", Arrays.toString(k.getInterfaces()));
+ } else {
+ return String.format("%s", k);
+ }
+ }
public static void doTest(Class<?> k) {
+ String pk = printClass(k);
+ try {
+ System.out.println(pk + " is defined in file \"" + getSourceFileName(k) + "\"");
+ } catch (Exception e) {
+ System.out.println(pk + " does not have a known source file because " + e);
+ }
try {
- System.out.println(k + " is defined in file \"" + getSourceFileName(k) + "\"");
+ System.out.println(pk + " has extension \"" + getSourceDebugExtension(k) + "\"");
} catch (Exception e) {
- System.out.println(k + " does not have a known source file because " + e);
+ System.out.println(pk + " does not have a known source file extension because " + e);
}
}
public static native String getSourceFileName(Class<?> k) throws Exception;
+ public static native String getSourceDebugExtension(Class<?> k) throws Exception;
}
diff --git a/test/993-breakpoints/breakpoints.cc b/test/993-breakpoints/breakpoints.cc
index 3734ce8634..e9cf3b32c6 100644
--- a/test/993-breakpoints/breakpoints.cc
+++ b/test/993-breakpoints/breakpoints.cc
@@ -49,6 +49,57 @@ jobject JNICALL Java_art_Test993_constructNative(JNIEnv* env,
}
extern "C" JNIEXPORT
+void JNICALL Java_art_Test993_invokeNativeObject(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject target,
+ jclass clazz,
+ jobject thizz) {
+ jmethodID method = env->FromReflectedMethod(target);
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ if (thizz == nullptr) {
+ env->CallStaticObjectMethod(clazz, method);
+ } else {
+ env->CallObjectMethod(thizz, method);
+ }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test993_invokeNativeBool(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject target,
+ jclass clazz,
+ jobject thizz) {
+ jmethodID method = env->FromReflectedMethod(target);
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ if (thizz == nullptr) {
+ env->CallStaticBooleanMethod(clazz, method);
+ } else {
+ env->CallBooleanMethod(thizz, method);
+ }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test993_invokeNativeLong(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject target,
+ jclass clazz,
+ jobject thizz) {
+ jmethodID method = env->FromReflectedMethod(target);
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ if (thizz == nullptr) {
+ env->CallStaticLongMethod(clazz, method);
+ } else {
+ env->CallLongMethod(thizz, method);
+ }
+}
+
+extern "C" JNIEXPORT
void JNICALL Java_art_Test993_invokeNative(JNIEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jobject target,
diff --git a/test/993-breakpoints/expected.txt b/test/993-breakpoints/expected.txt
index 962154734b..1749a77e9d 100644
--- a/test/993-breakpoints/expected.txt
+++ b/test/993-breakpoints/expected.txt
@@ -552,6 +552,107 @@ Running private instance invoke
Breakpoint: private void art.Test993$TestClass4.privateMethod() @ line=118
Invoking "new TestClass4().callPrivateMethod()"
Breakpoint: private void art.Test993$TestClass4.privateMethod() @ line=118
+Running Vector constructor
+ Breaking on []
+ Native constructor: public java.util.Vector(), type: class java.util.Vector
+ Created: []
+ Reflective constructor: public java.util.Vector()
+ Created: []
+ Constructing: new Vector()
+ Created: []
+ Breaking on [public java.util.Vector() @ <NON-DETERMINISTIC>]
+ Native constructor: public java.util.Vector(), type: class java.util.Vector
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Reflective constructor: public java.util.Vector()
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Constructing: new Vector()
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+Running Stack constructor
+ Breaking on []
+ Native constructor: public java.util.Stack(), type: class java.util.Stack
+ Created: []
+ Reflective constructor: public java.util.Stack()
+ Created: []
+ Constructing: new Stack()
+ Created: []
+ Breaking on [public java.util.Stack() @ <NON-DETERMINISTIC>]
+ Native constructor: public java.util.Stack(), type: class java.util.Stack
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Reflective constructor: public java.util.Stack()
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Constructing: new Stack()
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Breaking on [public java.util.Vector() @ <NON-DETERMINISTIC>]
+ Native constructor: public java.util.Stack(), type: class java.util.Stack
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Reflective constructor: public java.util.Stack()
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Constructing: new Stack()
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Breaking on [public java.util.Stack() @ <NON-DETERMINISTIC>, public java.util.Vector() @ <NON-DETERMINISTIC>]
+ Native constructor: public java.util.Stack(), type: class java.util.Stack
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Reflective constructor: public java.util.Stack()
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+ Constructing: new Stack()
+ Breakpoint: public java.util.Stack() @ line=<NON-DETERMINISTIC>
+ Breakpoint: public java.util.Vector() @ line=<NON-DETERMINISTIC>
+ Created: []
+Running bcp static invoke
+ Breaking on []
+ Native invoking: public static java.util.Optional java.util.Optional.empty() args: [this: null]
+ Reflective invoking: public static java.util.Optional java.util.Optional.empty() args: [this: null]
+ Invoking "Optional::empty"
+ Breaking on [public static java.util.Optional java.util.Optional.empty() @ <NON-DETERMINISTIC>]
+ Native invoking: public static java.util.Optional java.util.Optional.empty() args: [this: null]
+ Breakpoint: public static java.util.Optional java.util.Optional.empty() @ line=<NON-DETERMINISTIC>
+ Reflective invoking: public static java.util.Optional java.util.Optional.empty() args: [this: null]
+ Breakpoint: public static java.util.Optional java.util.Optional.empty() @ line=<NON-DETERMINISTIC>
+ Invoking "Optional::empty"
+ Breakpoint: public static java.util.Optional java.util.Optional.empty() @ line=<NON-DETERMINISTIC>
+Running bcp private static invoke
+ Breaking on []
+ Native invoking: private static long java.util.Random.seedUniquifier() args: [this: null]
+ Invoking "Random::seedUniquifier"
+ Breaking on [private static long java.util.Random.seedUniquifier() @ <NON-DETERMINISTIC>]
+ Native invoking: private static long java.util.Random.seedUniquifier() args: [this: null]
+ Breakpoint: private static long java.util.Random.seedUniquifier() @ line=<NON-DETERMINISTIC>
+ Invoking "Random::seedUniquifier"
+ Breakpoint: private static long java.util.Random.seedUniquifier() @ line=<NON-DETERMINISTIC>
+Running bcp private invoke
+ Breaking on []
+ Native invoking: private java.math.BigDecimal java.time.Duration.toSeconds() args: [this: PT336H]
+ Invoking "Duration::toSeconds"
+ Breaking on [private java.math.BigDecimal java.time.Duration.toSeconds() @ <NON-DETERMINISTIC>]
+ Native invoking: private java.math.BigDecimal java.time.Duration.toSeconds() args: [this: PT336H]
+ Breakpoint: private java.math.BigDecimal java.time.Duration.toSeconds() @ line=<NON-DETERMINISTIC>
+ Invoking "Duration::toSeconds"
+ Breakpoint: private java.math.BigDecimal java.time.Duration.toSeconds() @ line=<NON-DETERMINISTIC>
+Running bcp invoke
+ Breaking on []
+ Native invoking: public boolean java.util.Optional.isPresent() args: [this: Optional[test]]
+ Reflective invoking: public boolean java.util.Optional.isPresent() args: [this: Optional[test2]]
+ Invoking "Optional::isPresent"
+ Breaking on [public boolean java.util.Optional.isPresent() @ <NON-DETERMINISTIC>]
+ Native invoking: public boolean java.util.Optional.isPresent() args: [this: Optional[test]]
+ Breakpoint: public boolean java.util.Optional.isPresent() @ line=<NON-DETERMINISTIC>
+ Reflective invoking: public boolean java.util.Optional.isPresent() args: [this: Optional[test2]]
+ Breakpoint: public boolean java.util.Optional.isPresent() @ line=<NON-DETERMINISTIC>
+ Invoking "Optional::isPresent"
+ Breakpoint: public boolean java.util.Optional.isPresent() @ line=<NON-DETERMINISTIC>
Running TestClass1 constructor
Breaking on []
Native constructor: public art.Test993$TestClass1(), type: class art.Test993$TestClass1
diff --git a/test/993-breakpoints/src/art/Test993.java b/test/993-breakpoints/src/art/Test993.java
index 781ebffc0f..d6a6a676cd 100644
--- a/test/993-breakpoints/src/art/Test993.java
+++ b/test/993-breakpoints/src/art/Test993.java
@@ -16,20 +16,20 @@
package art;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.util.Arrays;
import java.lang.reflect.Executable;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
-import java.util.List;
-import java.util.Set;
-import java.util.Spliterator;
-import java.util.Spliterators;
-import java.util.Collection;
+
+import java.time.Duration;
+
import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.function.IntUnaryOperator;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+import java.util.Random;
+import java.util.Stack;
+import java.util.Vector;
+
import java.util.function.Supplier;
public class Test993 {
@@ -120,7 +120,13 @@ public class Test993 {
}
public static void notifyBreakpointReached(Thread thr, Executable e, long loc) {
- System.out.println("\t\t\tBreakpoint: " + e + " @ line=" + Breakpoint.locationToLine(e, loc));
+ String line;
+ if (e.getDeclaringClass().getPackage().equals(Test993.class.getPackage())) {
+ line = Integer.valueOf(Breakpoint.locationToLine(e, loc)).toString();
+ } else {
+ line = "<NON-DETERMINISTIC>";
+ }
+ System.out.println("\t\t\tBreakpoint: " + e + " @ line=" + line);
}
public static interface ThrowRunnable extends Runnable {
@@ -180,6 +186,57 @@ public class Test993 {
public static native void invokeNative(Method m, Class<?> clazz, Object thizz);
+ public static class InvokeNativeBool implements Runnable {
+ Method m;
+ Object this_arg;
+ public InvokeNativeBool(Method m, Object this_arg) {
+ this.m = m;
+ this.this_arg = this_arg;
+ }
+
+ @Override
+ public void run() {
+ System.out.println("\t\tNative invoking: " + m + " args: [this: " + this_arg + "]");
+ invokeNativeBool(m, m.getDeclaringClass(), this_arg);
+ }
+ }
+
+ public static native void invokeNativeBool(Method m, Class<?> clazz, Object thizz);
+
+ public static class InvokeNativeObject implements Runnable {
+ Method m;
+ Object this_arg;
+ public InvokeNativeObject(Method m, Object this_arg) {
+ this.m = m;
+ this.this_arg = this_arg;
+ }
+
+ @Override
+ public void run() {
+ System.out.println("\t\tNative invoking: " + m + " args: [this: " + this_arg + "]");
+ invokeNativeObject(m, m.getDeclaringClass(), this_arg);
+ }
+ }
+
+ public static native void invokeNativeObject(Method m, Class<?> clazz, Object thizz);
+
+ public static class InvokeNativeLong implements Runnable {
+ Method m;
+ Object this_arg;
+ public InvokeNativeLong(Method m, Object this_arg) {
+ this.m = m;
+ this.this_arg = this_arg;
+ }
+
+ @Override
+ public void run() {
+ System.out.println("\t\tNative invoking: " + m + " args: [this: " + this_arg + "]");
+ invokeNativeLong(m, m.getDeclaringClass(), this_arg);
+ }
+ }
+
+ public static native void invokeNativeLong(Method m, Class<?> clazz, Object thizz);
+
public static class ConstructDirect implements Runnable {
String msg;
Supplier<Object> s;
@@ -258,7 +315,15 @@ public class Test993 {
}
private static Breakpoint.Manager.BP BP(Executable m) {
- return new Breakpoint.Manager.BP(m);
+ return new Breakpoint.Manager.BP(m) {
+ public String toString() {
+ if (method.getDeclaringClass().getPackage().equals(Test993.class.getPackage())) {
+ return super.toString();
+ } else {
+ return method.toString() + " @ <NON-DETERMINISTIC>";
+ }
+ }
+ };
}
public static void run() throws Exception {
@@ -271,6 +336,7 @@ public class Test993 {
Thread.currentThread());
runMethodTests();
+ runBCPMethodTests();
runConstructorTests();
Breakpoint.stopBreakpointWatch(Thread.currentThread());
@@ -302,6 +368,94 @@ public class Test993 {
runTestGroups("TestClass1ext constructor", tc1ext_constructors, tc1ext_bps);
}
+ // These test to make sure we are able to break on functions that might have been quickened or
+ // inlined from the boot-image. These were all chosen for being in the bootclasspath, not being
+ // long enough to prevent inlining, and not being used for the testing framework.
+ public static void runBCPMethodTests() throws Exception {
+ // The methods we will be breaking on.
+ Method bcp_private_method = Duration.class.getDeclaredMethod("toSeconds");
+ Method bcp_virtual_method = Optional.class.getDeclaredMethod("isPresent");
+ Method bcp_static_method = Optional.class.getDeclaredMethod("empty");
+ Method bcp_private_static_method = Random.class.getDeclaredMethod("seedUniquifier");
+
+ // Some constructors we will break on.
+ Constructor<?> bcp_stack_constructor = Stack.class.getConstructor();
+ Constructor<?> bcp_vector_constructor = Vector.class.getConstructor();
+ if (!(Vector.class.isAssignableFrom(Stack.class))) {
+ throw new Error("Expected Stack to extend Vector!");
+ }
+
+ // BCP constructors.
+ Runnable[] vector_constructors = new Runnable[] {
+ new ConstructNative(bcp_vector_constructor),
+ new ConstructReflect(bcp_vector_constructor),
+ new ConstructDirect("new Vector()", Vector::new),
+ };
+ Breakpoint.Manager.BP[] vector_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_vector_constructor),
+ };
+ runTestGroups("Vector constructor", vector_constructors, vector_breakpoints);
+
+ Runnable[] stack_constructors = new Runnable[] {
+ new ConstructNative(bcp_stack_constructor),
+ new ConstructReflect(bcp_stack_constructor),
+ new ConstructDirect("new Stack()", Stack::new),
+ };
+ Breakpoint.Manager.BP[] stack_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_stack_constructor), BP(bcp_vector_constructor),
+ };
+ runTestGroups("Stack constructor", stack_constructors, stack_breakpoints);
+
+ // Static function
+ Runnable[] static_invokes = new Runnable[] {
+ new InvokeNativeObject(bcp_static_method, null),
+
+ new InvokeReflect(bcp_static_method, null),
+
+ new InvokeDirect("Optional::empty", () -> { Optional.empty(); }),
+ };
+ Breakpoint.Manager.BP[] static_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_static_method)
+ };
+ runTestGroups("bcp static invoke", static_invokes, static_breakpoints);
+
+ // Static private class function
+ Runnable[] private_static_invokes = new Runnable[] {
+ new InvokeNativeLong(bcp_private_static_method, null),
+
+ new InvokeDirect("Random::seedUniquifier", () -> { new Random(); }),
+ };
+ Breakpoint.Manager.BP[] private_static_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_private_static_method)
+ };
+ runTestGroups("bcp private static invoke", private_static_invokes, private_static_breakpoints);
+
+ // private class method
+ Duration test_duration = Duration.ofDays(14);
+ Runnable[] private_invokes = new Runnable[] {
+ new InvokeNativeObject(bcp_private_method, test_duration),
+
+ new InvokeDirect("Duration::toSeconds", () -> { test_duration.multipliedBy(2); }),
+ };
+ Breakpoint.Manager.BP[] private_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_private_method)
+ };
+ runTestGroups("bcp private invoke", private_invokes, private_breakpoints);
+
+ // class method
+ Runnable[] public_invokes = new Runnable[] {
+ new InvokeNativeBool(bcp_virtual_method, Optional.of("test")),
+
+ new InvokeReflect(bcp_virtual_method, Optional.of("test2")),
+
+ new InvokeDirect("Optional::isPresent", () -> { Optional.of("test3").isPresent(); }),
+ };
+ Breakpoint.Manager.BP[] public_breakpoints = new Breakpoint.Manager.BP[] {
+ BP(bcp_virtual_method)
+ };
+ runTestGroups("bcp invoke", public_invokes, public_breakpoints);
+ }
+
public static void runMethodTests() throws Exception {
// The methods we will be breaking on.
Method breakpoint_method = Test993.class.getDeclaredMethod("breakpoint");
diff --git a/test/Android.bp b/test/Android.bp
index b737345729..16b30f988f 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -74,7 +74,7 @@ art_cc_defaults {
],
target: {
- linux_glibc: {
+ linux: {
ldflags: [
// Allow jni_compiler_test to find Java_MyClassNatives_bar
// within itself using dlopen(NULL, ...).
@@ -83,9 +83,6 @@ art_cc_defaults {
"-Wl,-u,Java_MyClassNatives_bar",
"-Wl,-u,Java_MyClassNatives_sbar",
],
- shared_libs: [
- "libziparchive",
- ],
cflags: [
// gtest issue
"-Wno-used-but-marked-unused",
@@ -93,23 +90,15 @@ art_cc_defaults {
"-Wno-missing-noreturn",
],
},
- android: {
- ldflags: [
- // Allow jni_compiler_test to find Java_MyClassNatives_bar
- // within itself using dlopen(NULL, ...).
- "-Wl,--export-dynamic",
- "-Wl,-u,Java_MyClassNatives_bar",
- "-Wl,-u,Java_MyClassNatives_sbar",
+ host: {
+ shared_libs: [
+ "libziparchive",
],
+ },
+ android: {
shared_libs: [
"liblog",
],
- cflags: [
- // gtest issue
- "-Wno-used-but-marked-unused",
- "-Wno-deprecated",
- "-Wno-missing-noreturn",
- ],
},
},
}
@@ -135,15 +124,7 @@ art_cc_defaults {
android64: {
cflags: ["-DART_TARGET_NATIVETEST_DIR=/data/nativetest64/art"],
},
- android: {
- cflags: [
- // gtest issue
- "-Wno-used-but-marked-unused",
- "-Wno-deprecated",
- "-Wno-missing-noreturn",
- ],
- },
- linux_glibc: {
+ linux: {
cflags: [
// gtest issue
"-Wno-used-but-marked-unused",
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 2fda494086..bf964a6895 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -16,6 +16,7 @@ BOOT_IMAGE=""
COMPILE_FLAGS=""
DALVIKVM="dalvikvm32"
DEBUGGER="n"
+WITH_AGENT=""
DEBUGGER_AGENT=""
WRAP_DEBUGGER_AGENT="n"
DEV_MODE="n"
@@ -228,6 +229,11 @@ while true; do
FLAGS="${FLAGS} -Xcompiler-option --dump-cfg-append"
COMPILE_FLAGS="${COMPILE_FLAGS} --dump-cfg-append"
shift
+ elif [ "x$1" = "x--with-agent" ]; then
+ shift
+ USE_JVMTI="y"
+ WITH_AGENT="$1"
+ shift
elif [ "x$1" = "x--debug-wrap-agent" ]; then
WRAP_DEBUGGER_AGENT="y"
shift
@@ -442,6 +448,10 @@ elif [ "$DEBUGGER" = "agent" ]; then
DEBUGGER_OPTS="-agentpath:${AGENTPATH}=transport=dt_socket,address=$PORT,server=y,suspend=y"
fi
+if [ "x$WITH_AGENT" != "x" ]; then
+ FLAGS="${FLAGS} -agentpath:${WITH_AGENT}"
+fi
+
if [ "$USE_JVMTI" = "y" ]; then
if [ "$USE_JVM" = "n" ]; then
plugin=libopenjdkjvmtid.so
diff --git a/test/run-test b/test/run-test
index 09a70e50a9..fdb2ee47a7 100755
--- a/test/run-test
+++ b/test/run-test
@@ -291,6 +291,11 @@ while true; do
elif [ "x$1" = "x--debug-wrap-agent" ]; then
run_args="${run_args} --debug-wrap-agent"
shift
+ elif [ "x$1" = "x--with-agent" ]; then
+ shift
+ option="$1"
+ run_args="${run_args} --with-agent $1"
+ shift
elif [ "x$1" = "x--debug-agent" ]; then
shift
option="$1"
@@ -661,6 +666,7 @@ if [ "$usage" = "yes" ]; then
echo " only supported on host."
echo " --debug-wrap-agent use libwrapagentproperties and tools/libjdwp-compat.props"
echo " to load the debugger agent specified by --debug-agent."
+ echo " --with-agent <agent> Run the test with the given agent loaded with -agentpath:"
echo " --debuggable Whether to compile Java code for a debugger."
echo " --gdb Run under gdb; incompatible with some tests."
echo " --gdb-arg Pass an option to gdb."
diff --git a/test/testrunner/device_config.py b/test/testrunner/device_config.py
new file mode 100644
index 0000000000..c7ed6f7fbc
--- /dev/null
+++ b/test/testrunner/device_config.py
@@ -0,0 +1,20 @@
+device_config = {
+# Configuration syntax:
+#
+# device: The value of ro.product.name or 'host'
+# properties: (Use one or more of these).
+# * run-test-args: additional run-test-args
+#
+# *** IMPORTANT ***:
+# This configuration is used by the android build server. Targets must not be renamed
+# or removed.
+#
+##########################################
+ # Fugu's don't have enough memory to support a 128m heap with normal concurrency.
+ 'aosp_fugu' : {
+ 'run-test-args': [ "--runtime-option", "-Xmx128m" ],
+ },
+ 'fugu' : {
+ 'run-test-args': [ "--runtime-option", "-Xmx128m" ],
+ },
+}
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index b9123deee2..55569629ea 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -53,15 +53,13 @@ def _dump_many_vars(var_name):
all_vars=" ".join(_DUMP_MANY_VARS_LIST)
# The command is taken from build/envsetup.sh to fetch build variables.
- command = ("CALLED_FROM_SETUP=true " # Enable the 'dump-many-vars' make target.
- "BUILD_SYSTEM=build/core " # Set up lookup path for make includes.
- "make --no-print-directory -C \"%s\" -f build/core/config.mk "
- "dump-many-vars DUMP_MANY_VARS=\"%s\"") % (ANDROID_BUILD_TOP, all_vars)
+ command = ("build/soong/soong_ui.bash --dumpvars-mode --vars=\"%s\"") % (all_vars)
config = subprocess.Popen(command,
stdout=subprocess.PIPE,
universal_newlines=True,
- shell=True).communicate()[0] # read until EOF, select stdin
+ shell=True,
+ cwd=ANDROID_BUILD_TOP).communicate()[0] # read until EOF, select stdin
# Prints out something like:
# TARGET_ARCH='arm64'
# HOST_ARCH='x86_64'
diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py
index 492b792239..531508e6b3 100755
--- a/test/testrunner/run_build_test_target.py
+++ b/test/testrunner/run_build_test_target.py
@@ -64,6 +64,7 @@ os.environ.update(custom_env)
if target.has_key('make'):
build_command = 'make'
+ build_command += ' DX='
build_command += ' -j' + str(n_threads)
build_command += ' -C ' + env.ANDROID_BUILD_TOP
build_command += ' ' + target.get('make')
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index ca29d0a484..a9aed6c520 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -60,6 +60,7 @@ import time
import env
from target_config import target_config
+from device_config import device_config
# timeout for individual tests.
# TODO: make it adjustable per tests and for buildbots
@@ -116,6 +117,9 @@ stop_testrunner = False
dex2oat_jobs = -1 # -1 corresponds to default threads for dex2oat
run_all_configs = False
+# Dict containing extra arguments
+extra_arguments = { "host" : [], "target" : [] }
+
# Dict to store user requested test variants.
# key: variant_type.
# value: set of variants user wants to run of type <key>.
@@ -239,6 +243,10 @@ def setup_test_env():
n_thread = get_default_threads('host')
print_text("Concurrency: " + str(n_thread) + "\n")
+ global extra_arguments
+ for target in _user_input_variants['target']:
+ extra_arguments[target] = find_extra_device_arguments(target)
+
global semaphore
semaphore = threading.Semaphore(n_thread)
@@ -252,6 +260,33 @@ def setup_test_env():
COLOR_SKIP = ''
COLOR_NORMAL = ''
+def find_extra_device_arguments(target):
+ """
+ Gets any extra arguments from the device_config.
+ """
+ if target == 'host':
+ return device_config.get(target, { 'run-test-args' : [] })['run-test-args']
+ else:
+ device = get_device_name()
+ return device_config.get(device, { 'run-test-args' : [] })['run-test-args']
+
+def get_device_name():
+ """
+ Gets the value of ro.product.name from remote device.
+ """
+ proc = subprocess.Popen(['adb', 'shell', 'getprop', 'ro.product.name'],
+ stderr=subprocess.STDOUT,
+ stdout = subprocess.PIPE,
+ universal_newlines=True)
+ # only wait 2 seconds.
+ output = proc.communicate(timeout = 2)[0]
+ success = not proc.wait()
+ if success:
+ return output.strip()
+ else:
+ print_text("Unable to determine device type!\n")
+ print_text("Continuing anyway.\n")
+ return "UNKNOWN_TARGET"
def run_tests(tests):
"""Creates thread workers to run the tests.
@@ -434,7 +469,7 @@ def run_tests(tests):
tempfile.mkdtemp(dir=env.ART_HOST_TEST_DIR)) + options_test
run_test_sh = env.ANDROID_BUILD_TOP + '/art/test/run-test'
- command = run_test_sh + ' ' + options_test + ' ' + test
+ command = ' '.join((run_test_sh, options_test, ' '.join(extra_arguments[target]), test))
semaphore.acquire()
worker = threading.Thread(target=run_test, args=(command, test, variant_set, test_name))
@@ -913,6 +948,7 @@ def main():
if 'target' in _user_input_variants['target']:
build_targets += 'test-art-target-run-test-dependencies'
build_command = 'make'
+ build_command += ' DX='
build_command += ' -j'
build_command += ' -C ' + env.ANDROID_BUILD_TOP
build_command += ' ' + build_targets
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 5eccba1327..e5da385a8a 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -62,6 +62,12 @@ AHAT_TEST_DUMP_HPROF := $(intermediates.COMMON)/test-dump.hprof
AHAT_TEST_DUMP_BASE_HPROF := $(intermediates.COMMON)/test-dump-base.hprof
AHAT_TEST_DUMP_PROGUARD_MAP := $(intermediates.COMMON)/test-dump.map
+# Directories to use for ANDROID_DATA when generating the test dumps to
+# ensure we don't pollute the source tree with any artifacts from running
+# dalvikvm.
+AHAT_TEST_DUMP_ANDROID_DATA := $(intermediates.COMMON)/test-dump-android_data
+AHAT_TEST_DUMP_BASE_ANDROID_DATA := $(intermediates.COMMON)/test-dump-base-android_data
+
# Generate the proguard map in the desired location by copying it from
# wherever the build system generates it by default.
$(AHAT_TEST_DUMP_PROGUARD_MAP): PRIVATE_AHAT_SOURCE_PROGUARD_MAP := $(proguard_dictionary)
@@ -70,20 +76,28 @@ $(AHAT_TEST_DUMP_PROGUARD_MAP): $(proguard_dictionary)
# Run ahat-test-dump.jar to generate test-dump.hprof and test-dump-base.hprof
AHAT_TEST_DUMP_DEPENDENCIES := \
- $(ART_HOST_EXECUTABLES) \
+ $(HOST_OUT_EXECUTABLES)/dalvikvm64 \
$(ART_HOST_SHARED_LIBRARY_DEPENDENCIES) \
$(HOST_OUT_EXECUTABLES)/art \
$(HOST_CORE_IMG_OUT_BASE)$(CORE_IMG_SUFFIX)
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
+$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ANDROID_DATA := $(AHAT_TEST_DUMP_ANDROID_DATA)
$(AHAT_TEST_DUMP_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES)
- $(PRIVATE_AHAT_TEST_ART) -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@
+ rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA)
+ mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA)
+ ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \
+ $(PRIVATE_AHAT_TEST_ART) --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@
$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
+$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_ANDROID_DATA := $(AHAT_TEST_DUMP_BASE_ANDROID_DATA)
$(AHAT_TEST_DUMP_BASE_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES)
- $(PRIVATE_AHAT_TEST_ART) -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base
+ rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA)
+ mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA)
+ ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \
+ $(PRIVATE_AHAT_TEST_ART) --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base
# --- ahat-tests.jar --------------
include $(CLEAR_VARS)
@@ -117,4 +131,6 @@ AHAT_TEST_DUMP_HPROF :=
AHAT_TEST_DUMP_BASE_HPROF :=
AHAT_TEST_DUMP_PROGUARD_MAP :=
AHAT_TEST_DUMP_DEPENDENCIES :=
+AHAT_TEST_DUMP_ANDROID_DATA :=
+AHAT_TEST_DUMP_BASE_ANDROID_DATA :=
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
index cb2d738f23..1a3d127fc9 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
@@ -428,8 +428,7 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
* Returns null if the given instance has no next instance to the gc root.
*/
private static PathElement getNextPathElementToGcRoot(AhatInstance inst) {
- AhatInstance parent = inst.mNextInstanceToGcRoot;
- if (parent == null) {
+ if (inst.isRoot()) {
return null;
}
return new PathElement(inst.mNextInstanceToGcRoot, inst.mNextInstanceToGcRootField);
@@ -487,40 +486,64 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
* mHardReverseReferences
* mSoftReverseReferences
*/
- static void computeReverseReferences(AhatInstance root) {
- // Do a breadth first search to visit the nodes.
- Queue<Reference> bfs = new ArrayDeque<Reference>();
+ static void computeReverseReferences(SuperRoot root) {
+ // Start by doing a breadth first search through strong references.
+ // Then continue the breadth first search through weak references.
+ Queue<Reference> strong = new ArrayDeque<Reference>();
+ Queue<Reference> weak = new ArrayDeque<Reference>();
+
for (Reference ref : root.getReferences()) {
- bfs.add(ref);
+ strong.add(ref);
}
- while (!bfs.isEmpty()) {
- Reference ref = bfs.poll();
- if (ref.ref.mHardReverseReferences == null && ref.strong) {
- // This is the first time we are seeing ref.ref through a strong
- // reference.
+ while (!strong.isEmpty()) {
+ Reference ref = strong.poll();
+ assert ref.strong;
+
+ if (ref.ref.mNextInstanceToGcRoot == null) {
+ // This is the first time we have seen ref.ref.
ref.ref.mNextInstanceToGcRoot = ref.src;
ref.ref.mNextInstanceToGcRootField = ref.field;
ref.ref.mHardReverseReferences = new ArrayList<AhatInstance>();
+
for (Reference childRef : ref.ref.getReferences()) {
- bfs.add(childRef);
+ if (childRef.strong) {
+ strong.add(childRef);
+ } else {
+ weak.add(childRef);
+ }
}
}
- // Note: ref.src is null when the src is the SuperRoot.
- if (ref.src != null) {
- if (ref.strong) {
- ref.ref.mHardReverseReferences.add(ref.src);
- } else {
- if (ref.ref.mSoftReverseReferences == null) {
- ref.ref.mSoftReverseReferences = new ArrayList<AhatInstance>();
- if (ref.ref.mNextInstanceToGcRoot == null) {
- ref.ref.mNextInstanceToGcRoot = ref.src;
- ref.ref.mNextInstanceToGcRootField = ref.field;
- }
- }
- ref.ref.mSoftReverseReferences.add(ref.src);
+ // Note: We specifically exclude 'root' from the reverse references
+ // because it is a fake SuperRoot instance not present in the original
+ // heap dump.
+ if (ref.src != root) {
+ ref.ref.mHardReverseReferences.add(ref.src);
+ }
+ }
+
+ while (!weak.isEmpty()) {
+ Reference ref = weak.poll();
+
+ if (ref.ref.mNextInstanceToGcRoot == null) {
+ // This is the first time we have seen ref.ref.
+ ref.ref.mNextInstanceToGcRoot = ref.src;
+ ref.ref.mNextInstanceToGcRootField = ref.field;
+ ref.ref.mHardReverseReferences = new ArrayList<AhatInstance>();
+
+ for (Reference childRef : ref.ref.getReferences()) {
+ weak.add(childRef);
+ }
+ }
+
+ if (ref.strong) {
+ ref.ref.mHardReverseReferences.add(ref.src);
+ } else {
+ if (ref.ref.mSoftReverseReferences == null) {
+ ref.ref.mSoftReverseReferences = new ArrayList<AhatInstance>();
}
+ ref.ref.mSoftReverseReferences.add(ref.src);
}
}
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java b/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
index a2adbd2808..5210e31167 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
@@ -54,7 +54,7 @@ public class SuperRoot extends AhatInstance implements DominatorsComputation.Nod
@Override
public Reference get(int index) {
String field = ".roots[" + Integer.toString(index) + "]";
- return new Reference(null, field, mRoots.get(index), true);
+ return new Reference(SuperRoot.this, field, mRoots.get(index), true);
}
};
}
diff --git a/tools/ahat/src/test-dump/Main.java b/tools/ahat/src/test-dump/Main.java
index 333d28c214..079be7da81 100644
--- a/tools/ahat/src/test-dump/Main.java
+++ b/tools/ahat/src/test-dump/Main.java
@@ -93,6 +93,8 @@ public class Main {
null};
public Reference aLongStrongPathToSamplePathObject;
public WeakReference aShortWeakPathToSamplePathObject;
+ public WeakReference aWeakRefToGcRoot = new WeakReference(Main.class);
+ public SoftReference aWeakChain = new SoftReference(new Reference(new Reference(new Object())));
public Object[] basicStringRef;
public AddedObject addedObject;
public UnchangedObject unchangedObject = new UnchangedObject();
@@ -126,10 +128,11 @@ public class Main {
Main.class.getClassLoader(), 0x12345, 50000);
registry.registerNativeAllocation(anObject, 0xABCDABCD);
- aLongStrongPathToSamplePathObject = new Reference(new Reference(new Object()));
- aShortWeakPathToSamplePathObject = new WeakReference(
- ((Reference)aLongStrongPathToSamplePathObject.referent).referent,
- referenceQueue);
+ {
+ Object object = new Object();
+ aLongStrongPathToSamplePathObject = new Reference(new Reference(new Reference(object)));
+ aShortWeakPathToSamplePathObject = new WeakReference(new Reference(object));
+ }
addedObject = baseline ? null : new AddedObject();
removedObject = baseline ? new RemovedObject() : null;
diff --git a/tools/ahat/src/test/com/android/ahat/InstanceTest.java b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
index a4908fd0ab..8fbb8849f0 100644
--- a/tools/ahat/src/test/com/android/ahat/InstanceTest.java
+++ b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
@@ -274,12 +274,20 @@ public class InstanceTest {
public void gcRootPathNotWeak() throws IOException {
TestDump dump = TestDump.getTestDump();
- AhatInstance strong = dump.getDumpedAhatInstance("aLongStrongPathToSamplePathObject");
- AhatInstance strong2 = strong.getField("referent").asAhatInstance();
- AhatInstance object = strong2.getField("referent").asAhatInstance();
+ // The test dump is set up to have the following graph:
+ // -S-> strong1 -S-> strong2 -S-> strong3 -S-> object
+ // -S-> weak1 -W-> weak2 ------------------S->-/
+ // The gc root path should go through the longer chain of strong
+ // references, not the shorter chain with weak references (even though the
+ // last element in the shorter chain is a strong reference).
+
+ AhatInstance strong1 = dump.getDumpedAhatInstance("aLongStrongPathToSamplePathObject");
+ AhatInstance strong2 = strong1.getField("referent").asAhatInstance();
+ AhatInstance strong3 = strong2.getField("referent").asAhatInstance();
+ AhatInstance object = strong3.getField("referent").asAhatInstance();
List<PathElement> path = object.getPathFromGcRoot();
- assertEquals(strong2, path.get(path.size() - 2).instance);
+ assertEquals(strong3, path.get(path.size() - 2).instance);
}
@Test
@@ -368,6 +376,39 @@ public class InstanceTest {
}
@Test
+ public void weakRefToGcRoot() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ AhatInstance ref = dump.getDumpedAhatInstance("aWeakRefToGcRoot");
+
+ // The weak reference points to Main.class, which we expect will be marked
+ // as a GC root. In theory Main.class doesn't have to be a GC root, in
+ // which case this test case will need to be revised.
+ AhatInstance root = ref.getField("referent").asAhatInstance();
+ assertTrue(root.isRoot());
+
+ // We had a bug in the past where weak references to GC roots caused the
+ // roots to be incorrectly be considered weakly reachable.
+ assertTrue(root.isStronglyReachable());
+ assertFalse(root.isWeaklyReachable());
+ }
+
+ @Test
+ public void weakReferenceChain() throws IOException {
+ // If the only reference to a chain of strongly referenced objects is a
+ // weak reference, then all of the objects should be considered weakly
+ // reachable.
+ TestDump dump = TestDump.getTestDump();
+ AhatInstance ref = dump.getDumpedAhatInstance("aWeakChain");
+ AhatInstance weak1 = ref.getField("referent").asAhatInstance();
+ AhatInstance weak2 = weak1.getField("referent").asAhatInstance();
+ AhatInstance weak3 = weak2.getField("referent").asAhatInstance();
+ assertTrue(ref.isStronglyReachable());
+ assertTrue(weak1.isWeaklyReachable());
+ assertTrue(weak2.isWeaklyReachable());
+ assertTrue(weak3.isWeaklyReachable());
+ }
+
+ @Test
public void reverseReferences() throws IOException {
TestDump dump = TestDump.getTestDump();
AhatInstance obj = dump.getDumpedAhatInstance("anObject");
diff --git a/tools/breakpoint-logger/Android.bp b/tools/breakpoint-logger/Android.bp
new file mode 100644
index 0000000000..67b423abf1
--- /dev/null
+++ b/tools/breakpoint-logger/Android.bp
@@ -0,0 +1,66 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+
+cc_defaults {
+ name: "breakpointlogger-defaults",
+ host_supported: true,
+ srcs: ["breakpoint_logger.cc"],
+ defaults: ["art_defaults"],
+
+ // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+ // to be same ISA as what it is attached to.
+ compile_multilib: "both",
+
+ shared_libs: [
+ "libbase",
+ ],
+ target: {
+ android: {
+ },
+ host: {
+ },
+ },
+ header_libs: [
+ "libopenjdkjvmti_headers",
+ ],
+ multilib: {
+ lib32: {
+ suffix: "32",
+ },
+ lib64: {
+ suffix: "64",
+ },
+ },
+ symlink_preferred_arch: true,
+}
+
+art_cc_library {
+ name: "libbreakpointlogger",
+ defaults: ["breakpointlogger-defaults"],
+ shared_libs: [
+ ],
+}
+
+art_cc_library {
+ name: "libbreakpointloggerd",
+ defaults: [
+ "art_debug_defaults",
+ "breakpointlogger-defaults",
+ ],
+ shared_libs: [],
+}
diff --git a/tools/breakpoint-logger/README.md b/tools/breakpoint-logger/README.md
new file mode 100644
index 0000000000..d7ffb3440f
--- /dev/null
+++ b/tools/breakpoint-logger/README.md
@@ -0,0 +1,54 @@
+# breakpointlogger
+
+breakpointlogger is a JVMTI agent that lets one set breakpoints that are logged
+when they are hit.
+
+# Usage
+### Build
+> `make libbreakpointlogger` # or 'make libbreakpointloggerd' with debugging checks enabled
+
+The libraries will be built for 32-bit, 64-bit, host and target. Below examples
+assume you want to use the 64-bit version.
+
+### Command Line
+
+The agent is loaded using -agentpath like normal. It takes arguments in the
+following format:
+> `:class_descriptor:->:methodName::method_sig:@:breakpoint_location:,[...]`
+
+* The breakpoint\_location is a number that's a valid jlocation for the runtime
+ being used. On ART this is a dex-pc. Dex-pcs can be found using tools such as
+ dexdump and are uint16\_t-offsets from the start of the method. On other
+ runtimes jlocations might represent other things.
+
+* Multiple breakpoints can be included in the options, separated with ','s.
+
+* Unlike with most normal debuggers the agent will load the class immediately to
+ set the breakpoint. This means that classes might be initialized earlier than
+ one might expect. This also means that one cannot set breakpoints on classes
+ that cannot be found using standard or bootstrap classloader at startup.
+
+* Deviating from this format or including a breakpoint that cannot be found at
+ startup will cause the runtime to abort.
+
+#### ART
+> `art -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so '-agentpath:libbreakpointlogger.so=Lclass/Name;->methodName()V@0' -cp tmp/java/helloworld.dex -Xint helloworld`
+
+* `-Xplugin` and `-agentpath` need to be used, otherwise the agent will fail during init.
+* If using `libartd.so`, make sure to use the debug version of jvmti.
+
+#### RI
+> `java '-agentpath:libbreakpointlogger.so=Lclass/Name;->methodName()V@0' -cp tmp/helloworld/classes helloworld`
+
+### Output
+A normal run will look something like this:
+
+ % ./test/run-test --host --dev --with-agent 'libbreakpointlogger.so=LMain;->main([Ljava/lang/String;)V@0' 001-HelloWorld
+ <normal output removed>
+ dalvikvm32 W 10-25 10:39:09 18063 18063 breakpointlogger.cc:277] Breakpoint at location: 0x00000000 in method LMain;->main([Ljava/lang/String;)V (source: Main.java:13) thread: main
+ Hello, world!
+
+ % ./test/run-test --jvm --dev --with-agent 'libbreakpointlogger.so=LMain;->main([Ljava/lang/String;)V@0' 001-HelloWorld
+ <normal output removed>
+ java W 10-25 10:39:09 18063 18063 breakpointlogger.cc:277] Breakpoint at location: 0x00000000 in method LMain;->main([Ljava/lang/String;)V (source: Main.java:13) thread: main
+ Hello, world!
diff --git a/tools/breakpoint-logger/breakpoint_logger.cc b/tools/breakpoint-logger/breakpoint_logger.cc
new file mode 100644
index 0000000000..b48a1788e3
--- /dev/null
+++ b/tools/breakpoint-logger/breakpoint_logger.cc
@@ -0,0 +1,447 @@
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android-base/logging.h>
+#include <atomic>
+#include <iostream>
+#include <iomanip>
+#include <jni.h>
+#include <jvmti.h>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace breakpoint_logger {
+
+struct SingleBreakpointTarget {
+ std::string class_name;
+ std::string method_name;
+ std::string method_sig;
+ jlocation location;
+};
+
+struct BreakpointTargets {
+ std::vector<SingleBreakpointTarget> bps;
+};
+
+static void VMInitCB(jvmtiEnv* jvmti, JNIEnv* env, jthread thr ATTRIBUTE_UNUSED) {
+ BreakpointTargets* all_targets = nullptr;
+ jvmtiError err = jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&all_targets));
+ if (err != JVMTI_ERROR_NONE || all_targets == nullptr) {
+ env->FatalError("unable to get breakpoint targets");
+ }
+ for (const SingleBreakpointTarget& target : all_targets->bps) {
+ jclass k = env->FindClass(target.class_name.c_str());
+ if (env->ExceptionCheck()) {
+ env->ExceptionDescribe();
+ env->FatalError("Could not find class!");
+ return;
+ }
+ jmethodID m = env->GetMethodID(k, target.method_name.c_str(), target.method_sig.c_str());
+ if (env->ExceptionCheck()) {
+ env->ExceptionClear();
+ m = env->GetStaticMethodID(k, target.method_name.c_str(), target.method_sig.c_str());
+ if (env->ExceptionCheck()) {
+ env->ExceptionDescribe();
+ env->FatalError("Could not find method!");
+ return;
+ }
+ }
+ err = jvmti->SetBreakpoint(m, target.location);
+ if (err != JVMTI_ERROR_NONE) {
+ env->FatalError("unable to set breakpoint");
+ return;
+ }
+ env->DeleteLocalRef(k);
+ }
+}
+
+class ScopedThreadInfo {
+ public:
+ ScopedThreadInfo(jvmtiEnv* jvmti_env, JNIEnv* env, jthread thread)
+ : jvmti_env_(jvmti_env), env_(env), free_name_(false) {
+ memset(&info_, 0, sizeof(info_));
+ if (thread == nullptr) {
+ info_.name = const_cast<char*>("<NULLPTR>");
+ } else if (jvmti_env->GetThreadInfo(thread, &info_) != JVMTI_ERROR_NONE) {
+ info_.name = const_cast<char*>("<UNKNOWN THREAD>");
+ } else {
+ free_name_ = true;
+ }
+ }
+
+ ~ScopedThreadInfo() {
+ if (free_name_) {
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(info_.name));
+ }
+ env_->DeleteLocalRef(info_.thread_group);
+ env_->DeleteLocalRef(info_.context_class_loader);
+ }
+
+ const char* GetName() const {
+ return info_.name;
+ }
+
+ private:
+ jvmtiEnv* jvmti_env_;
+ JNIEnv* env_;
+ bool free_name_;
+ jvmtiThreadInfo info_;
+};
+
+class ScopedClassInfo {
+ public:
+ ScopedClassInfo(jvmtiEnv* jvmti_env, jclass c)
+ : jvmti_env_(jvmti_env),
+ class_(c),
+ name_(nullptr),
+ generic_(nullptr),
+ file_(nullptr),
+ debug_ext_(nullptr) {}
+
+ ~ScopedClassInfo() {
+ if (class_ != nullptr) {
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(file_));
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(debug_ext_));
+ }
+ }
+
+ bool Init() {
+ if (class_ == nullptr) {
+ name_ = const_cast<char*>("<NONE>");
+ generic_ = const_cast<char*>("<NONE>");
+ return true;
+ } else {
+ jvmtiError ret1 = jvmti_env_->GetSourceFileName(class_, &file_);
+ jvmtiError ret2 = jvmti_env_->GetSourceDebugExtension(class_, &debug_ext_);
+ return jvmti_env_->GetClassSignature(class_, &name_, &generic_) == JVMTI_ERROR_NONE &&
+ ret1 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
+ ret1 != JVMTI_ERROR_INVALID_CLASS &&
+ ret2 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
+ ret2 != JVMTI_ERROR_INVALID_CLASS;
+ }
+ }
+
+ jclass GetClass() const {
+ return class_;
+ }
+ const char* GetName() const {
+ return name_;
+ }
+ // Generic type parameters, whatever is in the <> for a class
+ const char* GetGeneric() const {
+ return generic_;
+ }
+ const char* GetSourceDebugExtension() const {
+ if (debug_ext_ == nullptr) {
+ return "<UNKNOWN_SOURCE_DEBUG_EXTENSION>";
+ } else {
+ return debug_ext_;
+ }
+ }
+ const char* GetSourceFileName() const {
+ if (file_ == nullptr) {
+ return "<UNKNOWN_FILE>";
+ } else {
+ return file_;
+ }
+ }
+
+ private:
+ jvmtiEnv* jvmti_env_;
+ jclass class_;
+ char* name_;
+ char* generic_;
+ char* file_;
+ char* debug_ext_;
+};
+
+class ScopedMethodInfo {
+ public:
+ ScopedMethodInfo(jvmtiEnv* jvmti_env, JNIEnv* env, jmethodID method)
+ : jvmti_env_(jvmti_env),
+ env_(env),
+ method_(method),
+ declaring_class_(nullptr),
+ class_info_(nullptr),
+ name_(nullptr),
+ signature_(nullptr),
+ generic_(nullptr),
+ first_line_(-1) {}
+
+ ~ScopedMethodInfo() {
+ env_->DeleteLocalRef(declaring_class_);
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(signature_));
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+ }
+
+ bool Init() {
+ if (jvmti_env_->GetMethodDeclaringClass(method_, &declaring_class_) != JVMTI_ERROR_NONE) {
+ return false;
+ }
+ class_info_.reset(new ScopedClassInfo(jvmti_env_, declaring_class_));
+ jint nlines;
+ jvmtiLineNumberEntry* lines;
+ jvmtiError err = jvmti_env_->GetLineNumberTable(method_, &nlines, &lines);
+ if (err == JVMTI_ERROR_NONE) {
+ if (nlines > 0) {
+ first_line_ = lines[0].line_number;
+ }
+ jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(lines));
+ } else if (err != JVMTI_ERROR_ABSENT_INFORMATION &&
+ err != JVMTI_ERROR_NATIVE_METHOD) {
+ return false;
+ }
+ return class_info_->Init() &&
+ (jvmti_env_->GetMethodName(method_, &name_, &signature_, &generic_) == JVMTI_ERROR_NONE);
+ }
+
+ const ScopedClassInfo& GetDeclaringClassInfo() const {
+ return *class_info_;
+ }
+
+ jclass GetDeclaringClass() const {
+ return declaring_class_;
+ }
+
+ const char* GetName() const {
+ return name_;
+ }
+
+ const char* GetSignature() const {
+ return signature_;
+ }
+
+ const char* GetGeneric() const {
+ return generic_;
+ }
+
+ jint GetFirstLine() const {
+ return first_line_;
+ }
+
+ private:
+ jvmtiEnv* jvmti_env_;
+ JNIEnv* env_;
+ jmethodID method_;
+ jclass declaring_class_;
+ std::unique_ptr<ScopedClassInfo> class_info_;
+ char* name_;
+ char* signature_;
+ char* generic_;
+ jint first_line_;
+
+ friend std::ostream& operator<<(std::ostream& os, ScopedMethodInfo const& method);
+};
+
+std::ostream& operator<<(std::ostream& os, const ScopedMethodInfo* method) {
+ return os << *method;
+}
+
+std::ostream& operator<<(std::ostream& os, ScopedMethodInfo const& method) {
+ return os << method.GetDeclaringClassInfo().GetName() << "->" << method.GetName()
+ << method.GetSignature() << " (source: "
+ << method.GetDeclaringClassInfo().GetSourceFileName() << ":" << method.GetFirstLine()
+ << ")";
+}
+
+static void BreakpointCB(jvmtiEnv* jvmti_env,
+ JNIEnv* env,
+ jthread thread,
+ jmethodID method,
+ jlocation location) {
+ ScopedThreadInfo info(jvmti_env, env, thread);
+ ScopedMethodInfo method_info(jvmti_env, env, method);
+ if (!method_info.Init()) {
+ LOG(ERROR) << "Unable to get method info!";
+ return;
+ }
+ LOG(WARNING) << "Breakpoint at location: 0x" << std::setw(8) << std::setfill('0') << std::hex
+ << location << " in method " << method_info << " thread: " << info.GetName();
+}
+
+static std::string SubstrOf(const std::string& s, size_t start, size_t end) {
+ if (end == std::string::npos) {
+ end = s.size();
+ }
+ if (end == start) {
+ return "";
+ }
+ CHECK_GT(end, start) << "cannot get substr of " << s;
+ return s.substr(start, end - start);
+}
+
+static bool ParseSingleBreakpoint(const std::string& bp, /*out*/SingleBreakpointTarget* target) {
+ std::string option = bp;
+ if (option.empty() || option[0] != 'L' || option.find(';') == std::string::npos) {
+ LOG(ERROR) << option << " doesn't look like it has a class name";
+ return false;
+ }
+ target->class_name = SubstrOf(option, 1, option.find(';'));
+
+ option = SubstrOf(option, option.find(';') + 1, std::string::npos);
+ if (option.size() < 2 || option[0] != '-' || option[1] != '>') {
+ LOG(ERROR) << bp << " doesn't seem to indicate a method, expected ->";
+ return false;
+ }
+ option = SubstrOf(option, 2, std::string::npos);
+ size_t sig_start = option.find('(');
+ size_t loc_start = option.find('@');
+ if (option.empty() || sig_start == std::string::npos) {
+ LOG(ERROR) << bp << " doesn't seem to have a method sig!";
+ return false;
+ } else if (loc_start == std::string::npos ||
+ loc_start < sig_start ||
+ loc_start + 1 >= option.size()) {
+ LOG(ERROR) << bp << " doesn't seem to have a valid location!";
+ return false;
+ }
+ target->method_name = SubstrOf(option, 0, sig_start);
+ target->method_sig = SubstrOf(option, sig_start, loc_start);
+ target->location = std::stol(SubstrOf(option, loc_start + 1, std::string::npos));
+ return true;
+}
+
+static std::string RemoveLastOption(const std::string& op) {
+ if (op.find(',') == std::string::npos) {
+ return "";
+ } else {
+ return SubstrOf(op, op.find(',') + 1, std::string::npos);
+ }
+}
+
+// Fills targets with the breakpoints to add.
+// Lname/of/Klass;->methodName(Lsig/of/Method)Lreturn/Type;@location,<...>
+static bool ParseArgs(const std::string& start_options,
+ /*out*/BreakpointTargets* targets) {
+ for (std::string options = start_options;
+ !options.empty();
+ options = RemoveLastOption(options)) {
+ SingleBreakpointTarget target;
+ std::string next = SubstrOf(options, 0, options.find(','));
+ if (!ParseSingleBreakpoint(next, /*out*/ &target)) {
+ LOG(ERROR) << "Unable to parse breakpoint from " << next;
+ return false;
+ }
+ targets->bps.push_back(target);
+ }
+ return true;
+}
+
+enum class StartType {
+ OnAttach, OnLoad,
+};
+
+static jint AgentStart(StartType start,
+ JavaVM* vm,
+ char* options,
+ void* reserved ATTRIBUTE_UNUSED) {
+ jvmtiEnv* jvmti = nullptr;
+ jvmtiError error = JVMTI_ERROR_NONE;
+ {
+ jint res = 0;
+ res = vm->GetEnv(reinterpret_cast<void**>(&jvmti), JVMTI_VERSION_1_1);
+
+ if (res != JNI_OK || jvmti == nullptr) {
+ LOG(ERROR) << "Unable to access JVMTI, error code " << res;
+ return JNI_ERR;
+ }
+ }
+
+ void* bp_target_mem = nullptr;
+ error = jvmti->Allocate(sizeof(BreakpointTargets),
+ reinterpret_cast<unsigned char**>(&bp_target_mem));
+ if (error != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to alloc memory for breakpoint target data";
+ return JNI_ERR;
+ }
+
+ BreakpointTargets* data = new(bp_target_mem) BreakpointTargets;
+ error = jvmti->SetEnvironmentLocalStorage(data);
+ if (error != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to set local storage";
+ return JNI_ERR;
+ }
+
+ if (!ParseArgs(options, /*out*/data)) {
+ LOG(ERROR) << "failed to parse breakpoint list!";
+ return JNI_ERR;
+ }
+
+ jvmtiCapabilities caps {}; // NOLINT [readability/braces]
+ caps.can_generate_breakpoint_events = JNI_TRUE;
+ caps.can_get_line_numbers = JNI_TRUE;
+ caps.can_get_source_file_name = JNI_TRUE;
+ caps.can_get_source_debug_extension = JNI_TRUE;
+ error = jvmti->AddCapabilities(&caps);
+ if (error != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to set caps";
+ return JNI_ERR;
+ }
+
+ jvmtiEventCallbacks callbacks {}; // NOLINT [readability/braces]
+ callbacks.Breakpoint = &BreakpointCB;
+ callbacks.VMInit = &VMInitCB;
+
+ error = jvmti->SetEventCallbacks(&callbacks, static_cast<jint>(sizeof(callbacks)));
+
+ if (error != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to set event callbacks.";
+ return JNI_ERR;
+ }
+
+ error = jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_BREAKPOINT,
+ nullptr /* all threads */);
+ if (error != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to enable breakpoint event";
+ return JNI_ERR;
+ }
+ if (start == StartType::OnAttach) {
+ JNIEnv* env = nullptr;
+ jint res = 0;
+ res = vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_2);
+ if (res != JNI_OK || env == nullptr) {
+ LOG(ERROR) << "Unable to get jnienv";
+ return JNI_ERR;
+ }
+ VMInitCB(jvmti, env, nullptr);
+ } else {
+ error = jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_VM_INIT,
+ nullptr /* all threads */);
+ if (error != JVMTI_ERROR_NONE) {
+ LOG(ERROR) << "Unable to set event vminit";
+ return JNI_ERR;
+ }
+ }
+ return JNI_OK;
+}
+
+// Late attachment (e.g. 'am attach-agent').
+extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM *vm, char* options, void* reserved) {
+ return AgentStart(StartType::OnAttach, vm, options, reserved);
+}
+
+// Early attachment
+extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* jvm, char* options, void* reserved) {
+ return AgentStart(StartType::OnLoad, jvm, options, reserved);
+}
+
+} // namespace breakpoint_logger
+
diff --git a/tools/golem/build-target.sh b/tools/golem/build-target.sh
index 8d8e2bbe6f..4ca2722ac9 100755
--- a/tools/golem/build-target.sh
+++ b/tools/golem/build-target.sh
@@ -147,12 +147,8 @@ get_build_var() {
[[ -n $target_product ]] && extras+=" TARGET_PRODUCT=$target_product"
[[ -n $target_build_variant ]] && extras+=" TARGET_BUILD_VARIANT=$target_build_variant"
- # call dumpvar-$name from the makefile system.
- (\cd "$(gettop)";
- CALLED_FROM_SETUP=true BUILD_SYSTEM=build/core \
- command make --no-print-directory -f build/core/config.mk \
- $extras \
- dumpvar-$varname)
+ # call dumpvar from the build system.
+ (\cd "$(gettop)"; env $extras build/soong/soong_ui.bash --dumpvar-mode $varname)
}
# Defaults from command-line.
@@ -160,7 +156,7 @@ get_build_var() {
mode="" # blank or 'golem' if --golem was specified.
golem_target="" # --golem=$golem_target
config="" # --machine-type=$config
-j_arg="-j8"
+j_arg=""
showcommands=""
simulate=""
make_tarball=""
@@ -353,7 +349,7 @@ fi
# and maybe calls lunch).
#
-execute make "${j_arg}" "${make_target}"
+execute build/soong/soong_ui.bash --make-mode "${j_arg}" "${make_target}"
if $strip_symbols; then
# Further reduce size by stripping symbols.
diff --git a/tools/libjdwp_art_failures.txt b/tools/libjdwp_art_failures.txt
index bd422e964e..646a96adbb 100644
--- a/tools/libjdwp_art_failures.txt
+++ b/tools/libjdwp_art_failures.txt
@@ -22,13 +22,6 @@
name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues006Test#testGetValues006"
},
{
- description: "Tests fail due to using the not yet supported interrupt thread functions",
- result: EXEC_FAILED,
- bug: 34415266,
- names: [ "org.apache.harmony.jpda.tests.jdwp.ThreadReference.CurrentContendedMonitorTest#testCurrentContendedMonitor001",
- "org.apache.harmony.jpda.tests.jdwp.ThreadReference.InterruptTest#testInterrupt001" ]
-},
-{
description: "Tests fail with assertion error on slot number",
result: EXEC_FAILED,
bug: 66905468,
@@ -71,34 +64,6 @@
"org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExit",
"org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExitWithReturnValue" ]
},
-/* TODO Investigate these failures more closely */
-{
- description: "Tests that fail when run on the chromium buildbots against the prebuilt libjdwp.so in certain configurations",
- result: EXEC_FAILED,
- bug: 67497270,
- names: [
- "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEvents003Test#testCombinedEvents003_01",
- "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_01",
- "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_02",
- "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_03",
- "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_04",
- "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_05",
- "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_06",
- "org.apache.harmony.jpda.tests.jdwp.Events.VMDeathTest#testVMDeathEvent",
- "org.apache.harmony.jpda.tests.jdwp.MultiSession.ClassPrepareTest#testClassPrepare001",
- "org.apache.harmony.jpda.tests.jdwp.MultiSession.ExceptionTest#testException001",
- "org.apache.harmony.jpda.tests.jdwp.MultiSession.FieldAccessTest#testFieldAccess001",
- "org.apache.harmony.jpda.tests.jdwp.MultiSession.FieldModificationTest#testFieldModification001",
- "org.apache.harmony.jpda.tests.jdwp.MultiSession.SingleStepTest#testSingleStep001",
- "org.apache.harmony.jpda.tests.jdwp.MultiSession.VMDeathTest#testVMDeathRequest",
- "org.apache.harmony.jpda.tests.jdwp.ReferenceType.SignatureWithGenericTest#testSignatureWithGeneric001",
- "org.apache.harmony.jpda.tests.jdwp.StackFrame.GetValues002Test#testGetValues005_Int2",
- "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.SetDefaultStratumTest#testSetDefaultStratum001",
- "org.apache.harmony.jpda.tests.jdwp.ThreadReference.StatusTest#testStatus001",
- "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.AllClassesTest#testAllClasses002",
- "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.AllClassesWithGenericTest#testAllClassesWithGeneric001"
- ]
-},
/* TODO Categorize these failures more. */
{
description: "Tests that fail on both ART and RI. These tests are likely incorrect",
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index cee75df4ca..db8c54056d 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -285,7 +285,7 @@ fi
if [[ $using_jack == "true" ]]; then
toolchain_args="--toolchain jack --language JN --jack-arg -g"
else
- toolchain_args="--toolchain jdk --language CUR"
+ toolchain_args="--toolchain dx --language CUR"
fi
# Run the tests using vogar.
@@ -308,7 +308,7 @@ vogar $vm_command \
vogar_exit_status=$?
echo "Killing stalled dalvikvm processes..."
-if [[ $host == "yes" ]]; then
+if [[ $mode == "host" ]]; then
pkill -9 -f /bin/dalvikvm
else
adb shell pkill -9 -f /bin/dalvikvm
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index eecdd2fb5e..ed3cf40f56 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -149,7 +149,7 @@ vogar_args="$vogar_args --timeout 480"
if [[ $using_jack == "true" ]]; then
vogar_args="$vogar_args --toolchain jack --language JO"
else
- vogar_args="$vogar_args --toolchain jdk --language CUR"
+ vogar_args="$vogar_args --toolchain dx --language CUR"
fi
# JIT settings.
diff --git a/tools/wrapagentproperties/wrapagentproperties.cc b/tools/wrapagentproperties/wrapagentproperties.cc
index dca627046e..67d5279672 100644
--- a/tools/wrapagentproperties/wrapagentproperties.cc
+++ b/tools/wrapagentproperties/wrapagentproperties.cc
@@ -45,7 +45,6 @@ static std::mutex unload_mutex;
struct Unloader {
AgentUnloadFunction unload;
- void* dlclose_handle;
};
static std::vector<Unloader> unload_functions;
@@ -71,7 +70,6 @@ struct ProxyJavaVM {
std::lock_guard<std::mutex> lk(unload_mutex);
unload_functions.push_back({
reinterpret_cast<AgentUnloadFunction>(dlsym(dlopen_handle, kOnUnload)),
- dlopen_handle
});
}
attach = reinterpret_cast<AgentLoadFunction>(dlsym(dlopen_handle, kOnAttach));
@@ -337,7 +335,7 @@ extern "C" JNIEXPORT void JNICALL Agent_OnUnload(JavaVM* jvm) {
std::lock_guard<std::mutex> lk(unload_mutex);
for (const Unloader& u : unload_functions) {
u.unload(jvm);
- dlclose(u.dlclose_handle);
+ // Don't dlclose since some agents expect to still have code loaded after this.
}
unload_functions.clear();
}