summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk29
-rw-r--r--build/Android.common_test.mk4
-rw-r--r--build/Android.cpplint.mk8
-rw-r--r--build/Android.gtest.mk4
-rw-r--r--compiler/Android.mk3
-rw-r--r--compiler/common_compiler_test.cc4
-rw-r--r--compiler/dex/quick/quick_cfi_test.cc2
-rw-r--r--compiler/dex/quick/quick_compiler.cc32
-rw-r--r--compiler/dex/quick/quick_compiler.h3
-rw-r--r--compiler/dex/quick/x86/quick_assemble_x86_test.cc2
-rw-r--r--compiler/driver/compiled_method_storage_test.cc2
-rw-r--r--compiler/driver/compiler_driver.cc27
-rw-r--r--compiler/driver/compiler_driver.h10
-rw-r--r--compiler/driver/compiler_options.cc2
-rw-r--r--compiler/driver/compiler_options.h2
-rw-r--r--compiler/dwarf/method_debug_info.h4
-rw-r--r--compiler/elf_builder.h12
-rw-r--r--compiler/elf_writer_debug.cc293
-rw-r--r--compiler/elf_writer_debug.h15
-rw-r--r--compiler/elf_writer_quick.cc2
-rw-r--r--compiler/image_writer.cc303
-rw-r--r--compiler/image_writer.h98
-rw-r--r--compiler/jit/jit_compiler.cc41
-rw-r--r--compiler/jit/jit_compiler.h4
-rw-r--r--compiler/linker/relative_patcher_test.h2
-rw-r--r--compiler/oat_test.cc2
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc65
-rw-r--r--compiler/optimizing/code_generator.cc12
-rw-r--r--compiler/optimizing/code_generator.h120
-rw-r--r--compiler/optimizing/code_generator_arm.cc807
-rw-r--r--compiler/optimizing/code_generator_arm.h110
-rw-r--r--compiler/optimizing/code_generator_arm64.cc30
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_mips.cc391
-rw-r--r--compiler/optimizing/code_generator_mips.h2
-rw-r--r--compiler/optimizing/code_generator_mips64.cc216
-rw-r--r--compiler/optimizing/code_generator_mips64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc22
-rw-r--r--compiler/optimizing/code_generator_x86.h2
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc30
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
-rw-r--r--compiler/optimizing/instruction_simplifier.cc26
-rw-r--r--compiler/optimizing/intrinsics_arm.cc96
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc6
-rw-r--r--compiler/optimizing/intrinsics_mips.cc12
-rw-r--r--compiler/optimizing/intrinsics_mips.h5
-rw-r--r--compiler/optimizing/nodes.cc28
-rw-r--r--compiler/optimizing/nodes.h26
-rw-r--r--compiler/optimizing/nodes_arm64.h1
-rw-r--r--compiler/optimizing/optimizing_compiler.cc38
-rw-r--r--compiler/optimizing/parallel_move_resolver.cc5
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc2
-rw-r--r--compiler/optimizing/register_allocator.cc6
-rw-r--r--compiler/optimizing/stack_map_stream.cc2
-rw-r--r--compiler/optimizing/stack_map_test.cc16
-rw-r--r--compiler/profile_assistant.cc69
-rw-r--r--compiler/profile_assistant.h61
-rw-r--r--compiler/utils/arm/assembler_arm.h2
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc32
-rw-r--r--compiler/utils/assembler_thumb_test.cc70
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc197
-rw-r--r--compiler/utils/mips/assembler_mips.cc46
-rw-r--r--compiler/utils/mips/assembler_mips.h8
-rw-r--r--compiler/utils/mips/assembler_mips_test.cc24
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc16
-rw-r--r--compiler/utils/mips64/assembler_mips64.h4
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc16
-rw-r--r--compiler/utils/swap_space.cc45
-rw-r--r--compiler/utils/swap_space.h82
-rw-r--r--dex2oat/dex2oat.cc76
-rw-r--r--disassembler/disassembler_mips.cc2
-rw-r--r--patchoat/patchoat.cc2
-rw-r--r--runtime/Android.mk7
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S109
-rw-r--r--runtime/arch/stub_test.cc16
-rw-r--r--runtime/art_method-inl.h3
-rw-r--r--runtime/art_method.h4
-rw-r--r--runtime/asm_support.h74
-rw-r--r--runtime/base/allocator.h2
-rw-r--r--runtime/base/mutex.cc12
-rw-r--r--runtime/base/mutex.h1
-rw-r--r--runtime/class_linker.cc475
-rw-r--r--runtime/class_linker_test.cc3
-rw-r--r--runtime/class_table.h3
-rw-r--r--runtime/common_runtime_test.cc2
-rw-r--r--runtime/common_throws.cc24
-rw-r--r--runtime/common_throws.h10
-rw-r--r--runtime/elf_file.cc56
-rw-r--r--runtime/elf_file_impl.h6
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h90
-rw-r--r--runtime/entrypoints_order_test.cc5
-rw-r--r--runtime/gc/heap.cc14
-rw-r--r--runtime/gc/space/image_space.cc56
-rw-r--r--runtime/gc/space/image_space.h7
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/instrumentation.h7
-rw-r--r--runtime/intern_table.cc156
-rw-r--r--runtime/intern_table.h47
-rw-r--r--runtime/interpreter/interpreter.cc90
-rw-r--r--runtime/interpreter/interpreter.h5
-rw-r--r--runtime/interpreter/interpreter_common.h9
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc40
-rw-r--r--runtime/interpreter/mterp/Makefile_mterp49
-rw-r--r--runtime/interpreter/mterp/README.txt197
-rw-r--r--runtime/interpreter/mterp/arm/alt_stub.S12
-rw-r--r--runtime/interpreter/mterp/arm/bincmp.S36
-rw-r--r--runtime/interpreter/mterp/arm/binop.S35
-rw-r--r--runtime/interpreter/mterp/arm/binop2addr.S32
-rw-r--r--runtime/interpreter/mterp/arm/binopLit16.S29
-rw-r--r--runtime/interpreter/mterp/arm/binopLit8.S32
-rw-r--r--runtime/interpreter/mterp/arm/binopWide.S38
-rw-r--r--runtime/interpreter/mterp/arm/binopWide2addr.S34
-rw-r--r--runtime/interpreter/mterp/arm/entry.S64
-rw-r--r--runtime/interpreter/mterp/arm/fallback.S3
-rw-r--r--runtime/interpreter/mterp/arm/fbinop.S23
-rw-r--r--runtime/interpreter/mterp/arm/fbinop2addr.S21
-rw-r--r--runtime/interpreter/mterp/arm/fbinopWide.S23
-rw-r--r--runtime/interpreter/mterp/arm/fbinopWide2addr.S22
-rw-r--r--runtime/interpreter/mterp/arm/footer.S167
-rw-r--r--runtime/interpreter/mterp/arm/funop.S18
-rw-r--r--runtime/interpreter/mterp/arm/funopNarrower.S18
-rw-r--r--runtime/interpreter/mterp/arm/funopWider.S18
-rw-r--r--runtime/interpreter/mterp/arm/header.S279
-rw-r--r--runtime/interpreter/mterp/arm/invoke.S19
-rw-r--r--runtime/interpreter/mterp/arm/op_add_double.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_add_double_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_add_float.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_add_float_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_add_int.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_add_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_add_int_lit16.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_add_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_add_long.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_add_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_aget.S29
-rw-r--r--runtime/interpreter/mterp/arm/op_aget_boolean.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_aget_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_aget_char.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_aget_object.S21
-rw-r--r--runtime/interpreter/mterp/arm/op_aget_short.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_aget_wide.S24
-rw-r--r--runtime/interpreter/mterp/arm/op_and_int.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_and_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_and_int_lit16.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_and_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_and_long.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_and_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_aput.S29
-rw-r--r--runtime/interpreter/mterp/arm/op_aput_boolean.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_aput_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_aput_char.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_aput_object.S14
-rw-r--r--runtime/interpreter/mterp/arm/op_aput_short.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_aput_wide.S24
-rw-r--r--runtime/interpreter/mterp/arm/op_array_length.S13
-rw-r--r--runtime/interpreter/mterp/arm/op_check_cast.S17
-rw-r--r--runtime/interpreter/mterp/arm/op_cmp_long.S56
-rw-r--r--runtime/interpreter/mterp/arm/op_cmpg_double.S34
-rw-r--r--runtime/interpreter/mterp/arm/op_cmpg_float.S34
-rw-r--r--runtime/interpreter/mterp/arm/op_cmpl_double.S34
-rw-r--r--runtime/interpreter/mterp/arm/op_cmpl_float.S34
-rw-r--r--runtime/interpreter/mterp/arm/op_const.S9
-rw-r--r--runtime/interpreter/mterp/arm/op_const_16.S7
-rw-r--r--runtime/interpreter/mterp/arm/op_const_4.S8
-rw-r--r--runtime/interpreter/mterp/arm/op_const_class.S13
-rw-r--r--runtime/interpreter/mterp/arm/op_const_high16.S8
-rw-r--r--runtime/interpreter/mterp/arm/op_const_string.S13
-rw-r--r--runtime/interpreter/mterp/arm/op_const_string_jumbo.S15
-rw-r--r--runtime/interpreter/mterp/arm/op_const_wide.S13
-rw-r--r--runtime/interpreter/mterp/arm/op_const_wide_16.S9
-rw-r--r--runtime/interpreter/mterp/arm/op_const_wide_32.S11
-rw-r--r--runtime/interpreter/mterp/arm/op_const_wide_high16.S10
-rw-r--r--runtime/interpreter/mterp/arm/op_div_double.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_div_double_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_div_float.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_div_float_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_div_int.S30
-rw-r--r--runtime/interpreter/mterp/arm/op_div_int_2addr.S29
-rw-r--r--runtime/interpreter/mterp/arm/op_div_int_lit16.S28
-rw-r--r--runtime/interpreter/mterp/arm/op_div_int_lit8.S29
-rw-r--r--runtime/interpreter/mterp/arm/op_div_long.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_div_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_double_to_float.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_double_to_int.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_double_to_long.S52
-rw-r--r--runtime/interpreter/mterp/arm/op_fill_array_data.S14
-rw-r--r--runtime/interpreter/mterp/arm/op_filled_new_array.S19
-rw-r--r--runtime/interpreter/mterp/arm/op_filled_new_array_range.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_float_to_double.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_float_to_int.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_float_to_long.S39
-rw-r--r--runtime/interpreter/mterp/arm/op_goto.S28
-rw-r--r--runtime/interpreter/mterp/arm/op_goto_16.S23
-rw-r--r--runtime/interpreter/mterp/arm/op_goto_32.S32
-rw-r--r--runtime/interpreter/mterp/arm/op_if_eq.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_if_eqz.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_if_ge.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_if_gez.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_if_gt.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_if_gtz.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_if_le.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_if_lez.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_if_lt.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_if_ltz.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_if_ne.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_if_nez.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iget.S26
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_boolean.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_boolean_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_byte_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_char.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_char_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_object.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_object_quick.S17
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_quick.S14
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_short.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_short_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_wide.S22
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_wide_quick.S13
-rw-r--r--runtime/interpreter/mterp/arm/op_instance_of.S24
-rw-r--r--runtime/interpreter/mterp/arm/op_int_to_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_int_to_char.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_int_to_double.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_int_to_float.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_int_to_long.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_int_to_short.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_direct.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_direct_range.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_interface.S8
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_interface_range.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_static.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_static_range.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_super.S8
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_super_range.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_virtual.S8
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_virtual_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_virtual_range.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_invoke_virtual_range_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iput.S22
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_boolean.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_boolean_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_byte_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_char.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_char_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_object.S11
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_object_quick.S10
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_quick.S14
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_short.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_short_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_wide.S16
-rw-r--r--runtime/interpreter/mterp/arm/op_iput_wide_quick.S13
-rw-r--r--runtime/interpreter/mterp/arm/op_long_to_double.S27
-rw-r--r--runtime/interpreter/mterp/arm/op_long_to_float.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_long_to_int.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_monitor_enter.S14
-rw-r--r--runtime/interpreter/mterp/arm/op_monitor_exit.S18
-rw-r--r--runtime/interpreter/mterp/arm/op_move.S14
-rw-r--r--runtime/interpreter/mterp/arm/op_move_16.S14
-rw-r--r--runtime/interpreter/mterp/arm/op_move_exception.S9
-rw-r--r--runtime/interpreter/mterp/arm/op_move_from16.S14
-rw-r--r--runtime/interpreter/mterp/arm/op_move_object.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_move_object_16.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_move_object_from16.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_move_result.S14
-rw-r--r--runtime/interpreter/mterp/arm/op_move_result_object.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_move_result_wide.S9
-rw-r--r--runtime/interpreter/mterp/arm/op_move_wide.S11
-rw-r--r--runtime/interpreter/mterp/arm/op_move_wide_16.S11
-rw-r--r--runtime/interpreter/mterp/arm/op_move_wide_from16.S11
-rw-r--r--runtime/interpreter/mterp/arm/op_mul_double.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_mul_double_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_mul_float.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_mul_float_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_mul_int.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_mul_int_2addr.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_mul_int_lit16.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_mul_int_lit8.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_mul_long.S36
-rw-r--r--runtime/interpreter/mterp/arm/op_mul_long_2addr.S24
-rw-r--r--runtime/interpreter/mterp/arm/op_neg_double.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_neg_float.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_neg_int.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_neg_long.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_new_array.S19
-rw-r--r--runtime/interpreter/mterp/arm/op_new_instance.S14
-rw-r--r--runtime/interpreter/mterp/arm/op_nop.S3
-rw-r--r--runtime/interpreter/mterp/arm/op_not_int.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_not_long.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_or_int.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_or_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_or_int_lit16.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_or_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_or_long.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_or_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_packed_switch.S39
-rw-r--r--runtime/interpreter/mterp/arm/op_rem_double.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_rem_double_2addr.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_rem_float.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_rem_float_2addr.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_rem_int.S33
-rw-r--r--runtime/interpreter/mterp/arm/op_rem_int_2addr.S32
-rw-r--r--runtime/interpreter/mterp/arm/op_rem_int_lit16.S31
-rw-r--r--runtime/interpreter/mterp/arm/op_rem_int_lit8.S32
-rw-r--r--runtime/interpreter/mterp/arm/op_rem_long.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_rem_long_2addr.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_return.S12
-rw-r--r--runtime/interpreter/mterp/arm/op_return_object.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_return_void.S5
-rw-r--r--runtime/interpreter/mterp/arm/op_return_void_no_barrier.S3
-rw-r--r--runtime/interpreter/mterp/arm/op_return_wide.S10
-rw-r--r--runtime/interpreter/mterp/arm/op_rsub_int.S2
-rw-r--r--runtime/interpreter/mterp/arm/op_rsub_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sget.S27
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_boolean.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_char.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_object.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_short.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_wide.S21
-rw-r--r--runtime/interpreter/mterp/arm/op_shl_int.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_shl_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_shl_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_shl_long.S27
-rw-r--r--runtime/interpreter/mterp/arm/op_shl_long_2addr.S22
-rw-r--r--runtime/interpreter/mterp/arm/op_shr_int.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_shr_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_shr_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_shr_long.S27
-rw-r--r--runtime/interpreter/mterp/arm/op_shr_long_2addr.S22
-rw-r--r--runtime/interpreter/mterp/arm/op_sparse_switch.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sput.S20
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_boolean.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_char.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_object.S11
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_short.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sput_wide.S19
-rw-r--r--runtime/interpreter/mterp/arm/op_sub_double.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sub_double_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sub_float.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sub_float_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sub_int.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sub_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sub_long.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_sub_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_throw.S11
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_3e.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_3f.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_40.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_41.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_42.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_43.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_73.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_79.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_7a.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_f3.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_f4.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_f5.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_f6.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_f7.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_f8.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_f9.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_fa.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_fb.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_fc.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_fd.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_fe.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_unused_ff.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_ushr_int.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_ushr_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_ushr_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_ushr_long.S27
-rw-r--r--runtime/interpreter/mterp/arm/op_ushr_long_2addr.S22
-rw-r--r--runtime/interpreter/mterp/arm/op_xor_int.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_xor_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_xor_int_lit16.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_xor_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_xor_long.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_xor_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm/unop.S20
-rw-r--r--runtime/interpreter/mterp/arm/unopNarrower.S23
-rw-r--r--runtime/interpreter/mterp/arm/unopWide.S21
-rw-r--r--runtime/interpreter/mterp/arm/unopWider.S20
-rw-r--r--runtime/interpreter/mterp/arm/unused.S4
-rw-r--r--runtime/interpreter/mterp/arm/zcmp.S32
-rw-r--r--runtime/interpreter/mterp/config_arm298
-rw-r--r--runtime/interpreter/mterp/config_arm64298
-rw-r--r--runtime/interpreter/mterp/config_mips298
-rw-r--r--runtime/interpreter/mterp/config_mips64298
-rw-r--r--runtime/interpreter/mterp/config_x86298
-rw-r--r--runtime/interpreter/mterp/config_x86_64298
-rwxr-xr-xruntime/interpreter/mterp/gen_mterp.py602
-rw-r--r--runtime/interpreter/mterp/mterp.cc620
-rw-r--r--runtime/interpreter/mterp/mterp.h37
-rw-r--r--runtime/interpreter/mterp/mterp_stub.cc50
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm.S12202
-rwxr-xr-xruntime/interpreter/mterp/rebuild.sh24
-rw-r--r--runtime/jit/debugger_interface.cc63
-rw-r--r--runtime/jit/debugger_interface.h17
-rw-r--r--runtime/jit/jit.cc23
-rw-r--r--runtime/jit/jit.h3
-rw-r--r--runtime/jit/jit_code_cache.cc38
-rw-r--r--runtime/jit/jit_code_cache.h13
-rw-r--r--runtime/jit/offline_profiling_info.cc200
-rw-r--r--runtime/jit/offline_profiling_info.h68
-rw-r--r--runtime/jit/profile_saver.cc5
-rw-r--r--runtime/jit/profile_saver.h1
-rw-r--r--runtime/jni_env_ext.cc6
-rw-r--r--runtime/jni_env_ext.h6
-rw-r--r--runtime/jni_internal.cc240
-rw-r--r--runtime/jni_internal.h1
-rw-r--r--runtime/mem_map.cc64
-rw-r--r--runtime/mem_map.h10
-rw-r--r--runtime/mirror/class-inl.h1
-rw-r--r--runtime/mirror/class.cc65
-rw-r--r--runtime/mirror/class.h7
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc14
-rw-r--r--runtime/native/java_lang_Class.cc14
-rw-r--r--runtime/oat_file_assistant.cc1
-rw-r--r--runtime/quick_exception_handler.cc20
-rw-r--r--runtime/runtime.cc63
-rw-r--r--runtime/safe_map.h2
-rw-r--r--runtime/stack.cc3
-rw-r--r--runtime/stack.h51
-rw-r--r--runtime/stack_map.h29
-rw-r--r--runtime/thread.cc2
-rw-r--r--runtime/thread.h48
-rw-r--r--runtime/thread_list.cc36
-rw-r--r--runtime/thread_list.h2
-rw-r--r--runtime/utils.cc11
-rw-r--r--runtime/utils.h18
-rw-r--r--runtime/verifier/method_verifier.cc47
-rw-r--r--runtime/verifier/method_verifier.h2
-rw-r--r--runtime/verifier/reg_type.h30
-rw-r--r--test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc59
-rw-r--r--test/136-daemon-jni-shutdown/expected.txt5
-rw-r--r--test/136-daemon-jni-shutdown/info.txt1
-rw-r--r--test/136-daemon-jni-shutdown/src/Main.java47
-rwxr-xr-xtest/137-cfi/run2
-rwxr-xr-xtest/143-string-value/check20
-rw-r--r--test/143-string-value/expected.txt1
-rw-r--r--test/143-string-value/info.txt2
-rw-r--r--test/143-string-value/src/Main.java26
-rw-r--r--test/442-checker-constant-folding/src/Main.java13
-rw-r--r--test/458-checker-instruction-simplification/src/Main.java16
-rw-r--r--test/482-checker-loop-back-edge-use/src/Main.java2
-rw-r--r--test/529-checker-unresolved/expected.txt3
-rw-r--r--test/529-checker-unresolved/src/Main.java21
-rw-r--r--test/561-divrem/expected.txt0
-rw-r--r--test/561-divrem/info.txt2
-rw-r--r--test/561-divrem/src/Main.java103
-rw-r--r--test/561-shared-slowpaths/expected.txt1
-rw-r--r--test/561-shared-slowpaths/info.txt1
-rw-r--r--test/561-shared-slowpaths/src/Main.java154
-rw-r--r--test/562-bce-preheader/expected.txt1
-rw-r--r--test/562-bce-preheader/info.txt1
-rw-r--r--test/562-bce-preheader/src/Main.java107
-rw-r--r--test/562-no-intermediate/expected.txt0
-rw-r--r--test/562-no-intermediate/info.txt2
-rw-r--r--test/562-no-intermediate/src/Main.java27
-rw-r--r--test/701-easy-div-rem/genMain.py10
-rwxr-xr-xtest/969-iface-super/build43
-rw-r--r--test/969-iface-super/expected.txt47
-rw-r--r--test/969-iface-super/info.txt6
-rwxr-xr-xtest/969-iface-super/run17
-rw-r--r--test/969-iface-super/smali/A.smali28
-rw-r--r--test/969-iface-super/smali/B.smali28
-rw-r--r--test/969-iface-super/smali/C.smali41
-rw-r--r--test/969-iface-super/smali/D.smali41
-rw-r--r--test/969-iface-super/smali/E.smali41
-rw-r--r--test/969-iface-super/smali/F.smali40
-rw-r--r--test/969-iface-super/smali/G.smali53
-rw-r--r--test/969-iface-super/smali/H.smali66
-rw-r--r--test/969-iface-super/smali/classes.xml99
-rw-r--r--test/969-iface-super/smali/iface.smali30
-rw-r--r--test/969-iface-super/smali/iface2.smali36
-rw-r--r--test/969-iface-super/smali/iface3.smali22
-rwxr-xr-xtest/970-iface-super-resolution-generated/build55
-rw-r--r--test/970-iface-super-resolution-generated/expected.txt1
-rw-r--r--test/970-iface-super-resolution-generated/info.txt17
-rwxr-xr-xtest/970-iface-super-resolution-generated/run17
-rwxr-xr-xtest/970-iface-super-resolution-generated/util-src/generate_java.py77
-rwxr-xr-xtest/970-iface-super-resolution-generated/util-src/generate_smali.py614
-rwxr-xr-xtest/971-iface-super-partial-compile-generated/build50
-rw-r--r--test/971-iface-super-partial-compile-generated/expected.txt1
-rw-r--r--test/971-iface-super-partial-compile-generated/info.txt17
-rwxr-xr-xtest/971-iface-super-partial-compile-generated/run17
-rwxr-xr-xtest/971-iface-super-partial-compile-generated/util-src/generate_java.py138
-rwxr-xr-xtest/971-iface-super-partial-compile-generated/util-src/generate_smali.py689
-rw-r--r--test/Android.libarttest.mk1
-rw-r--r--test/Android.run-test.mk58
-rwxr-xr-xtest/etc/run-test-jar4
-rwxr-xr-xtest/run-test7
-rwxr-xr-xtools/buildbot-build.sh2
-rw-r--r--tools/libcore_failures.txt58
-rw-r--r--tools/libcore_failures_concurrent_collector.txt35
-rwxr-xr-xtools/run-libcore-tests.sh8
499 files changed, 26085 insertions, 1886 deletions
diff --git a/Android.mk b/Android.mk
index 34022ae1d0..4f73127123 100644
--- a/Android.mk
+++ b/Android.mk
@@ -400,6 +400,35 @@ oat-target-sync: oat-target
$(TEST_ART_ADB_ROOT_AND_REMOUNT)
adb sync
+####################################################################################################
+# Fake packages to ensure generation of libopenjdkd when one builds with mm/mmm/mmma.
+#
+# The library is required for starting a runtime in debug mode, but libartd does not depend on it
+# (dependency cycle otherwise).
+#
+# Note: * As the package is phony to create a dependency the package name is irrelevant.
+# * We make MULTILIB explicit to "both," just to state here that we want both libraries on
+# 64-bit systems, even if it is the default.
+
+# ART on the host.
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
+include $(CLEAR_VARS)
+LOCAL_MODULE := art-libartd-libopenjdkd-host-dependency
+LOCAL_MULTILIB := both
+LOCAL_REQUIRED_MODULES := libopenjdkd
+LOCAL_IS_HOST_MODULE := true
+include $(BUILD_PHONY_PACKAGE)
+endif
+
+# ART on the target.
+ifeq ($(ART_BUILD_TARGET_DEBUG),true)
+include $(CLEAR_VARS)
+LOCAL_MODULE := art-libartd-libopenjdkd-target-dependency
+LOCAL_MULTILIB := both
+LOCAL_REQUIRED_MODULES := libopenjdkd
+include $(BUILD_PHONY_PACKAGE)
+endif
+
########################################################################
# "m build-art" for quick minimal build
.PHONY: build-art
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index c9af1c67a4..ab7036717a 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -205,7 +205,7 @@ define build-art-test-dex
LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
- LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp
+ LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp -D jack.dex.output.multidex.legacy=true
endif
include $(BUILD_JAVA_LIBRARY)
$(5) := $$(LOCAL_INSTALLED_MODULE)
@@ -221,7 +221,7 @@ define build-art-test-dex
LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
- LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp
+ LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp -D jack.dex.output.multidex.legacy=true
endif
include $(BUILD_HOST_DALVIK_JAVA_LIBRARY)
$(6) := $$(LOCAL_INSTALLED_MODULE)
diff --git a/build/Android.cpplint.mk b/build/Android.cpplint.mk
index 953cfc091c..a06f45a4fb 100644
--- a/build/Android.cpplint.mk
+++ b/build/Android.cpplint.mk
@@ -16,10 +16,14 @@
include art/build/Android.common_build.mk
-ART_CPPLINT := art/tools/cpplint.py
+ART_CPPLINT := $(LOCAL_PATH)/tools/cpplint.py
ART_CPPLINT_FILTER := --filter=-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references,-runtime/sizeof,-runtime/threadsafe_fn,-runtime/printf
ART_CPPLINT_FLAGS := --quiet
-ART_CPPLINT_SRC := $(shell find art -name "*.h" -o -name "*$(ART_CPP_EXTENSION)" | grep -v art/compiler/llvm/generated/ | grep -v art/runtime/elf\.h)
+# This:
+# 1) Gets a list of all .h & .cc files in the art directory.
+# 2) Prepends 'art/' to each of them to make the full name.
+# 3) removes art/runtime/elf.h from the list.
+ART_CPPLINT_SRC := $(filter-out $(LOCAL_PATH)/runtime/elf.h, $(addprefix $(LOCAL_PATH)/, $(call all-subdir-named-files,*.h) $(call all-subdir-named-files,*$(ART_CPP_EXTENSION))))
# "mm cpplint-art" to verify we aren't regressing
.PHONY: cpplint-art
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 3d16c49fe4..af64470a17 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -441,7 +441,7 @@ define define-art-gtest-rule-target
$(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \
$$(ART_TARGET_NATIVETEST_OUT)/$$(TARGET_$(2)ARCH)/$(1) \
$$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
- $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libopenjdk.so \
+ $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \
$$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \
$$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
@@ -485,7 +485,7 @@ define define-art-gtest-rule-host
# Dependencies for all host gtests.
gtest_deps := $$(HOST_CORE_DEX_LOCATIONS) \
$$($(2)ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$$(ART_HOST_SHLIB_EXTENSION) \
- $$($(2)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$$(ART_HOST_SHLIB_EXTENSION) \
+ $$($(2)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$$(ART_HOST_SHLIB_EXTENSION) \
$$(gtest_exe) \
$$(ART_GTEST_$(1)_HOST_DEPS) \
$(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX))
diff --git a/compiler/Android.mk b/compiler/Android.mk
index f0bf4997c6..458973684e 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -108,7 +108,8 @@ LIBART_COMPILER_SRC_FILES := \
elf_writer_debug.cc \
elf_writer_quick.cc \
image_writer.cc \
- oat_writer.cc
+ oat_writer.cc \
+ profile_assistant.cc
LIBART_COMPILER_SRC_FILES_arm := \
dex/quick/arm/assemble_arm.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 278c49017e..b5fd1e074f 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -208,8 +208,8 @@ void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind, InstructionSe
false,
timer_.get(),
-1,
- /* profile_file */ "",
- /* dex_to_oat_map */ nullptr));
+ /* dex_to_oat_map */ nullptr,
+ /* profile_compilation_info */ nullptr));
// We typically don't generate an image in unit tests, disable this optimization by default.
compiler_driver_->SetSupportBootImageFixup(false);
}
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index bcf20c7efa..12568a4ad4 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -92,7 +92,7 @@ class QuickCFITest : public CFITest {
false,
0,
-1,
- "",
+ nullptr,
nullptr);
ClassLinker* linker = nullptr;
CompilationUnit cu(&pool, isa, &driver, linker);
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 3260a7a050..ebc9a2c9ea 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -520,7 +520,12 @@ static bool CanCompileShorty(const char* shorty, InstructionSet instruction_set)
// In the rare cases we compile experimental opcodes, the runtime has an option to enable it,
// which will force scanning for any unsupported opcodes.
static bool SkipScanningUnsupportedOpcodes(InstructionSet instruction_set) {
- if (UNLIKELY(kUnsupportedOpcodesSize[instruction_set] == 0U)) {
+ Runtime* runtime = Runtime::Current();
+ if (UNLIKELY(runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods))) {
+ // Always need to scan opcodes if we have default methods since invoke-super for interface
+ // methods is never going to be supported in the quick compiler.
+ return false;
+ } else if (UNLIKELY(kUnsupportedOpcodesSize[instruction_set] == 0U)) {
// All opcodes are supported no matter what. Usually not the case
// since experimental opcodes are not implemented in the quick compiler.
return true;
@@ -538,8 +543,28 @@ static bool SkipScanningUnsupportedOpcodes(InstructionSet instruction_set) {
}
}
+bool QuickCompiler::CanCompileInstruction(const MIR* mir,
+ const DexFile& dex_file) const {
+ switch (mir->dalvikInsn.opcode) {
+ // Quick compiler won't support new instruction semantics to invoke-super into an interface
+ // method
+ case Instruction::INVOKE_SUPER: // Fall-through
+ case Instruction::INVOKE_SUPER_RANGE: {
+ DCHECK(mir->dalvikInsn.IsInvoke());
+ uint32_t invoke_method_idx = mir->dalvikInsn.vB;
+ const DexFile::MethodId& method_id = dex_file.GetMethodId(invoke_method_idx);
+ const DexFile::ClassDef* class_def = dex_file.FindClassDef(method_id.class_idx_);
+ // False if we are an interface i.e. !(java_access_flags & kAccInterface)
+ return class_def != nullptr && ((class_def->GetJavaAccessFlags() & kAccInterface) == 0);
+ }
+ default:
+ return true;
+ }
+}
+
// Skip the method that we do not support currently.
-bool QuickCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_file,
+bool QuickCompiler::CanCompileMethod(uint32_t method_idx,
+ const DexFile& dex_file,
CompilationUnit* cu) const {
// This is a limitation in mir_graph. See MirGraph::SetNumSSARegs.
if (cu->mir_graph->GetNumOfCodeAndTempVRs() > kMaxAllowedDalvikRegisters) {
@@ -580,6 +605,9 @@ bool QuickCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_fil
<< MIRGraph::extended_mir_op_names_[opcode - kMirOpFirst];
}
return false;
+ } else if (!CanCompileInstruction(mir, dex_file)) {
+ VLOG(compiler) << "Cannot compile dalvik opcode : " << mir->dalvikInsn.opcode;
+ return false;
}
// Check if it invokes a prototype that we cannot support.
if (std::find(kInvokeOpcodes, kInvokeOpcodes + arraysize(kInvokeOpcodes), opcode)
diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h
index d512b256cd..55f45f1ab0 100644
--- a/compiler/dex/quick/quick_compiler.h
+++ b/compiler/dex/quick/quick_compiler.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_DEX_QUICK_QUICK_COMPILER_H_
#include "compiler.h"
+#include "dex/mir_graph.h"
namespace art {
@@ -74,6 +75,8 @@ class QuickCompiler : public Compiler {
explicit QuickCompiler(CompilerDriver* driver);
private:
+ bool CanCompileInstruction(const MIR* mir, const DexFile& dex_file) const;
+
std::unique_ptr<PassManager> pre_opt_pass_manager_;
std::unique_ptr<PassManager> post_opt_pass_manager_;
DISALLOW_COPY_AND_ASSIGN(QuickCompiler);
diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
index 9deabc02e9..b39fe4da4f 100644
--- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc
+++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
@@ -73,7 +73,7 @@ class QuickAssembleX86TestBase : public testing::Test {
false,
0,
-1,
- "",
+ nullptr,
nullptr));
cu_.reset(new CompilationUnit(pool_.get(), isa_, compiler_driver_.get(), nullptr));
DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 84fb4324b5..f18fa67ea5 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -45,7 +45,7 @@ TEST(CompiledMethodStorage, Deduplicate) {
false,
nullptr,
-1,
- "",
+ nullptr,
nullptr);
CompiledMethodStorage* storage = driver.GetCompiledMethodStorage();
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index afb4b71ccf..043bd93bd7 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -347,8 +347,8 @@ CompilerDriver::CompilerDriver(
size_t thread_count, bool dump_stats, bool dump_passes,
const std::string& dump_cfg_file_name, bool dump_cfg_append,
CumulativeLogger* timer, int swap_fd,
- const std::string& profile_file,
- const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map)
+ const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map,
+ const ProfileCompilationInfo* profile_compilation_info)
: compiler_options_(compiler_options),
verification_results_(verification_results),
method_inliner_map_(method_inliner_map),
@@ -377,7 +377,8 @@ CompilerDriver::CompilerDriver(
support_boot_image_fixup_(instruction_set != kMips && instruction_set != kMips64),
dex_files_for_oat_file_(nullptr),
dex_file_oat_filename_map_(dex_to_oat_map),
- compiled_method_storage_(swap_fd) {
+ compiled_method_storage_(swap_fd),
+ profile_compilation_info_(profile_compilation_info) {
DCHECK(compiler_options_ != nullptr);
DCHECK(verification_results_ != nullptr);
DCHECK(method_inliner_map_ != nullptr);
@@ -385,12 +386,6 @@ CompilerDriver::CompilerDriver(
compiler_->Init();
CHECK_EQ(boot_image_, image_classes_.get() != nullptr);
-
- // Read the profile file if one is provided.
- if (!profile_file.empty()) {
- profile_compilation_info_.reset(new ProfileCompilationInfo(profile_file));
- LOG(INFO) << "Using profile data from file " << profile_file;
- }
}
CompilerDriver::~CompilerDriver() {
@@ -2306,15 +2301,11 @@ void CompilerDriver::InitializeClasses(jobject class_loader,
void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
- if (profile_compilation_info_ != nullptr) {
- if (!profile_compilation_info_->Load(dex_files)) {
- LOG(WARNING) << "Failed to load offline profile info from "
- << profile_compilation_info_->GetFilename()
- << ". No methods will be compiled";
- } else if (kDebugProfileGuidedCompilation) {
- LOG(INFO) << "[ProfileGuidedCompilation] "
- << profile_compilation_info_->DumpInfo();
- }
+ if (kDebugProfileGuidedCompilation) {
+ LOG(INFO) << "[ProfileGuidedCompilation] " <<
+ ((profile_compilation_info_ == nullptr)
+ ? "null"
+ : profile_compilation_info_->DumpInfo(&dex_files));
}
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index fa0cb9a412..3847c8183e 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -97,8 +97,8 @@ class CompilerDriver {
size_t thread_count, bool dump_stats, bool dump_passes,
const std::string& dump_cfg_file_name, bool dump_cfg_append,
CumulativeLogger* timer, int swap_fd,
- const std::string& profile_file,
- const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map);
+ const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map,
+ const ProfileCompilationInfo* profile_compilation_info);
~CompilerDriver();
@@ -657,9 +657,6 @@ class CompilerDriver {
// This option may be restricted to the boot image, depending on a flag in the implementation.
std::unique_ptr<std::unordered_set<std::string>> methods_to_compile_;
- // Info for profile guided compilation.
- std::unique_ptr<ProfileCompilationInfo> profile_compilation_info_;
-
bool had_hard_verifier_failure_;
size_t thread_count_;
@@ -689,6 +686,9 @@ class CompilerDriver {
CompiledMethodStorage compiled_method_storage_;
+ // Info for profile guided compilation.
+ const ProfileCompilationInfo* const profile_compilation_info_;
+
friend class CompileClassVisitor;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
};
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 209bb5a3c2..385f34a9f9 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -211,11 +211,9 @@ bool CompilerOptions::ParseCompilerOption(const StringPiece& option, UsageFn Usa
generate_debug_info_ = false;
} else if (option == "--debuggable") {
debuggable_ = true;
- generate_debug_info_ = true;
} else if (option == "--native-debuggable") {
native_debuggable_ = true;
debuggable_ = true;
- generate_debug_info_ = true;
} else if (option.starts_with("--top-k-profile-threshold=")) {
ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold_, Usage);
} else if (option == "--include-patch-information") {
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index f8032bb514..f14bdc4a2f 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -50,7 +50,7 @@ class CompilerOptions FINAL {
static const size_t kDefaultNumDexMethodsThreshold = 900;
static constexpr double kDefaultTopKProfileThreshold = 90.0;
static const bool kDefaultNativeDebuggable = false;
- static const bool kDefaultGenerateDebugInfo = kIsDebugBuild;
+ static const bool kDefaultGenerateDebugInfo = false;
static const bool kDefaultIncludePatchInformation = false;
static const size_t kDefaultInlineDepthLimit = 3;
static const size_t kDefaultInlineMaxCodeUnits = 32;
diff --git a/compiler/dwarf/method_debug_info.h b/compiler/dwarf/method_debug_info.h
index a391e4d08a..e8ba9148e8 100644
--- a/compiler/dwarf/method_debug_info.h
+++ b/compiler/dwarf/method_debug_info.h
@@ -30,8 +30,8 @@ struct MethodDebugInfo {
uint32_t access_flags_;
const DexFile::CodeItem* code_item_;
bool deduped_;
- uint32_t low_pc_;
- uint32_t high_pc_;
+ uintptr_t low_pc_;
+ uintptr_t high_pc_;
CompiledMethod* compiled_method_;
};
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index bb07cc2913..a7461a5525 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -148,6 +148,12 @@ class ElfBuilder FINAL {
}
}
+ // Returns true if the section was written to disk.
+ // (Used to check whether we have .text when writing JIT debug info)
+ bool Exists() const {
+ return finished_;
+ }
+
// Get the location of this section in virtual memory.
Elf_Addr GetAddress() const {
CHECK(started_);
@@ -247,16 +253,18 @@ class ElfBuilder FINAL {
}
// Buffer symbol for this section. It will be written later.
+ // If the symbol's section is null, it will be considered absolute (SHN_ABS).
+ // (we use this in JIT to reference code which is stored outside the debug ELF file)
void Add(Elf_Word name, const Section* section,
Elf_Addr addr, bool is_relative, Elf_Word size,
uint8_t binding, uint8_t type, uint8_t other = 0) {
- CHECK(section != nullptr);
Elf_Sym sym = Elf_Sym();
sym.st_name = name;
sym.st_value = addr + (is_relative ? section->GetAddress() : 0);
sym.st_size = size;
sym.st_other = other;
- sym.st_shndx = section->GetSectionIndex();
+ sym.st_shndx = (section != nullptr ? section->GetSectionIndex()
+ : static_cast<Elf_Word>(SHN_ABS));
sym.st_info = (binding << 4) + (type & 0xf);
symbols_.push_back(sym);
}
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index 2bc8c89f73..dd50f69b71 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -22,16 +22,20 @@
#include "base/casts.h"
#include "base/stl_util.h"
#include "compiled_method.h"
-#include "driver/compiler_driver.h"
#include "dex_file-inl.h"
+#include "driver/compiler_driver.h"
#include "dwarf/dedup_vector.h"
#include "dwarf/headers.h"
#include "dwarf/method_debug_info.h"
#include "dwarf/register.h"
#include "elf_builder.h"
+#include "linker/vector_output_stream.h"
+#include "mirror/array.h"
+#include "mirror/class-inl.h"
+#include "mirror/class.h"
#include "oat_writer.h"
-#include "utils.h"
#include "stack_map.h"
+#include "utils.h"
namespace art {
namespace dwarf {
@@ -219,6 +223,10 @@ void WriteCFISection(ElfBuilder<ElfTypes>* builder,
CHECK(format == DW_DEBUG_FRAME_FORMAT || format == DW_EH_FRAME_FORMAT);
typedef typename ElfTypes::Addr Elf_Addr;
+ if (method_infos.empty()) {
+ return;
+ }
+
std::vector<uint32_t> binary_search_table;
std::vector<uintptr_t> patch_locations;
if (format == DW_EH_FRAME_FORMAT) {
@@ -234,7 +242,9 @@ void WriteCFISection(ElfBuilder<ElfTypes>* builder,
{
cfi_section->Start();
const bool is64bit = Is64BitInstructionSet(builder->GetIsa());
- const Elf_Addr text_address = builder->GetText()->GetAddress();
+ const Elf_Addr text_address = builder->GetText()->Exists()
+ ? builder->GetText()->GetAddress()
+ : 0;
const Elf_Addr cfi_address = cfi_section->GetAddress();
const Elf_Addr cie_address = cfi_address;
Elf_Addr buffer_address = cfi_address;
@@ -305,8 +315,8 @@ namespace {
struct CompilationUnit {
std::vector<const MethodDebugInfo*> methods_;
size_t debug_line_offset_ = 0;
- uint32_t low_pc_ = 0xFFFFFFFFU;
- uint32_t high_pc_ = 0;
+ uintptr_t low_pc_ = std::numeric_limits<uintptr_t>::max();
+ uintptr_t high_pc_ = 0;
};
typedef std::vector<DexFile::LocalInfo> LocalInfos;
@@ -439,14 +449,17 @@ class DebugInfoWriter {
void Write(const CompilationUnit& compilation_unit) {
CHECK(!compilation_unit.methods_.empty());
- const Elf_Addr text_address = owner_->builder_->GetText()->GetAddress();
+ const Elf_Addr text_address = owner_->builder_->GetText()->Exists()
+ ? owner_->builder_->GetText()->GetAddress()
+ : 0;
+ const uintptr_t cu_size = compilation_unit.high_pc_ - compilation_unit.low_pc_;
info_.StartTag(DW_TAG_compile_unit);
info_.WriteStrp(DW_AT_producer, owner_->WriteString("Android dex2oat"));
info_.WriteData1(DW_AT_language, DW_LANG_Java);
info_.WriteStrp(DW_AT_comp_dir, owner_->WriteString("$JAVA_SRC_ROOT"));
info_.WriteAddr(DW_AT_low_pc, text_address + compilation_unit.low_pc_);
- info_.WriteUdata(DW_AT_high_pc, compilation_unit.high_pc_ - compilation_unit.low_pc_);
+ info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast<uint32_t>(cu_size));
info_.WriteSecOffset(DW_AT_stmt_list, compilation_unit.debug_line_offset_);
const char* last_dex_class_desc = nullptr;
@@ -464,8 +477,16 @@ class DebugInfoWriter {
if (last_dex_class_desc != nullptr) {
EndClassTag(last_dex_class_desc);
}
- size_t offset = StartClassTag(dex_class_desc);
- type_cache_.emplace(dex_class_desc, offset);
+ // Write reference tag for the class we are about to declare.
+ size_t reference_tag_offset = info_.StartTag(DW_TAG_reference_type);
+ type_cache_.emplace(std::string(dex_class_desc), reference_tag_offset);
+ size_t type_attrib_offset = info_.size();
+ info_.WriteRef4(DW_AT_type, 0);
+ info_.EndTag();
+ // Declare the class that owns this method.
+ size_t class_offset = StartClassTag(dex_class_desc);
+ info_.UpdateUint32(type_attrib_offset, class_offset);
+ info_.WriteFlag(DW_AT_declaration, true);
// Check that each class is defined only once.
bool unique = owner_->defined_dex_classes_.insert(dex_class_desc).second;
CHECK(unique) << "Redefinition of " << dex_class_desc;
@@ -476,7 +497,7 @@ class DebugInfoWriter {
info_.StartTag(DW_TAG_subprogram);
WriteName(dex->GetMethodName(dex_method));
info_.WriteAddr(DW_AT_low_pc, text_address + mi->low_pc_);
- info_.WriteUdata(DW_AT_high_pc, mi->high_pc_ - mi->low_pc_);
+ info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast<uint32_t>(mi->high_pc_-mi->low_pc_));
uint8_t frame_base[] = { DW_OP_call_frame_cfa };
info_.WriteExprLoc(DW_AT_frame_base, &frame_base, sizeof(frame_base));
WriteLazyType(dex->GetReturnTypeDescriptor(dex_proto));
@@ -562,6 +583,92 @@ class DebugInfoWriter {
owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
}
+ void Write(const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_) {
+ info_.StartTag(DW_TAG_compile_unit);
+ info_.WriteStrp(DW_AT_producer, owner_->WriteString("Android dex2oat"));
+ info_.WriteData1(DW_AT_language, DW_LANG_Java);
+
+ for (mirror::Class* type : types) {
+ if (type->IsPrimitive()) {
+ // For primitive types the definition and the declaration is the same.
+ if (type->GetPrimitiveType() != Primitive::kPrimVoid) {
+ WriteTypeDeclaration(type->GetDescriptor(nullptr));
+ }
+ } else if (type->IsArrayClass()) {
+ mirror::Class* element_type = type->GetComponentType();
+ uint32_t component_size = type->GetComponentSize();
+ uint32_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value();
+ uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
+
+ info_.StartTag(DW_TAG_array_type);
+ std::string descriptor_string;
+ WriteLazyType(element_type->GetDescriptor(&descriptor_string));
+ info_.WriteUdata(DW_AT_data_member_location, data_offset);
+ info_.StartTag(DW_TAG_subrange_type);
+ DCHECK_LT(length_offset, 32u);
+ uint8_t count[] = {
+ DW_OP_push_object_address,
+ static_cast<uint8_t>(DW_OP_lit0 + length_offset),
+ DW_OP_plus,
+ DW_OP_deref_size,
+ 4 // Array length is always 32-bit wide.
+ };
+ info_.WriteExprLoc(DW_AT_count, &count, sizeof(count));
+ info_.EndTag(); // DW_TAG_subrange_type.
+ info_.EndTag(); // DW_TAG_array_type.
+ } else {
+ std::string descriptor_string;
+ const char* desc = type->GetDescriptor(&descriptor_string);
+ StartClassTag(desc);
+
+ if (!type->IsVariableSize()) {
+ info_.WriteUdata(DW_AT_byte_size, type->GetObjectSize());
+ }
+
+ // Base class.
+ mirror::Class* base_class = type->GetSuperClass();
+ if (base_class != nullptr) {
+ info_.StartTag(DW_TAG_inheritance);
+ WriteLazyType(base_class->GetDescriptor(&descriptor_string));
+ info_.WriteUdata(DW_AT_data_member_location, 0);
+ info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public);
+ info_.EndTag(); // DW_TAG_inheritance.
+ }
+
+ // Member variables.
+ for (uint32_t i = 0, count = type->NumInstanceFields(); i < count; ++i) {
+ ArtField* field = type->GetInstanceField(i);
+ info_.StartTag(DW_TAG_member);
+ WriteName(field->GetName());
+ WriteLazyType(field->GetTypeDescriptor());
+ info_.WriteUdata(DW_AT_data_member_location, field->GetOffset().Uint32Value());
+ uint32_t access_flags = field->GetAccessFlags();
+ if (access_flags & kAccPublic) {
+ info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public);
+ } else if (access_flags & kAccProtected) {
+ info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_protected);
+ } else if (access_flags & kAccPrivate) {
+ info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private);
+ }
+ info_.EndTag(); // DW_TAG_member.
+ }
+
+ EndClassTag(desc);
+ }
+ }
+
+ CHECK_EQ(info_.Depth(), 1);
+ FinishLazyTypes();
+ info_.EndTag(); // DW_TAG_compile_unit.
+ std::vector<uint8_t> buffer;
+ buffer.reserve(info_.data()->size() + KB);
+ const size_t offset = owner_->builder_->GetDebugInfo()->GetSize();
+ const size_t debug_abbrev_offset =
+ owner_->debug_abbrev_.Insert(debug_abbrev_.data(), debug_abbrev_.size());
+ WriteDebugInfoCU(debug_abbrev_offset, info_, offset, &buffer, &owner_->debug_info_patches_);
+ owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
+ }
+
// Write table into .debug_loc which describes location of dex register.
// The dex register might be valid only at some points and it might
// move between machine registers and stack.
@@ -715,14 +822,14 @@ class DebugInfoWriter {
// just define all types lazily at the end of compilation unit.
void WriteLazyType(const char* type_descriptor) {
if (type_descriptor != nullptr && type_descriptor[0] != 'V') {
- lazy_types_.emplace(type_descriptor, info_.size());
+ lazy_types_.emplace(std::string(type_descriptor), info_.size());
info_.WriteRef4(DW_AT_type, 0);
}
}
void FinishLazyTypes() {
for (const auto& lazy_type : lazy_types_) {
- info_.UpdateUint32(lazy_type.second, WriteType(lazy_type.first));
+ info_.UpdateUint32(lazy_type.second, WriteTypeDeclaration(lazy_type.first));
}
lazy_types_.clear();
}
@@ -747,30 +854,39 @@ class DebugInfoWriter {
// Convert dex type descriptor to DWARF.
// Returns offset in the compilation unit.
- size_t WriteType(const char* desc) {
+ size_t WriteTypeDeclaration(const std::string& desc) {
+ DCHECK(!desc.empty());
const auto& it = type_cache_.find(desc);
if (it != type_cache_.end()) {
return it->second;
}
size_t offset;
- if (*desc == 'L') {
+ if (desc[0] == 'L') {
// Class type. For example: Lpackage/name;
- offset = StartClassTag(desc);
+ size_t class_offset = StartClassTag(desc.c_str());
info_.WriteFlag(DW_AT_declaration, true);
- EndClassTag(desc);
- } else if (*desc == '[') {
+ EndClassTag(desc.c_str());
+ // Reference to the class type.
+ offset = info_.StartTag(DW_TAG_reference_type);
+ info_.WriteRef(DW_AT_type, class_offset);
+ info_.EndTag();
+ } else if (desc[0] == '[') {
// Array type.
- size_t element_type = WriteType(desc + 1);
- offset = info_.StartTag(DW_TAG_array_type);
+ size_t element_type = WriteTypeDeclaration(desc.substr(1));
+ size_t array_type = info_.StartTag(DW_TAG_array_type);
+ info_.WriteFlag(DW_AT_declaration, true);
info_.WriteRef(DW_AT_type, element_type);
info_.EndTag();
+ offset = info_.StartTag(DW_TAG_reference_type);
+ info_.WriteRef4(DW_AT_type, array_type);
+ info_.EndTag();
} else {
// Primitive types.
const char* name;
uint32_t encoding;
uint32_t byte_size;
- switch (*desc) {
+ switch (desc[0]) {
case 'B':
name = "byte";
encoding = DW_ATE_signed;
@@ -815,7 +931,7 @@ class DebugInfoWriter {
LOG(FATAL) << "Void type should not be encoded";
UNREACHABLE();
default:
- LOG(FATAL) << "Unknown dex type descriptor: " << desc;
+ LOG(FATAL) << "Unknown dex type descriptor: \"" << desc << "\"";
UNREACHABLE();
}
offset = info_.StartTag(DW_TAG_base_type);
@@ -865,9 +981,10 @@ class DebugInfoWriter {
// Temporary buffer to create and store the entries.
DebugInfoEntryWriter<> info_;
// Cache of already translated type descriptors.
- std::map<const char*, size_t, CStringLess> type_cache_; // type_desc -> definition_offset.
+ std::map<std::string, size_t> type_cache_; // type_desc -> definition_offset.
// 32-bit references which need to be resolved to a type later.
- std::multimap<const char*, size_t, CStringLess> lazy_types_; // type_desc -> patch_offset.
+ // Given type may be used multiple times. Therefore we need a multimap.
+ std::multimap<std::string, size_t> lazy_types_; // type_desc -> patch_offset.
};
public:
@@ -883,6 +1000,11 @@ class DebugInfoWriter {
writer.Write(compilation_unit);
}
+ void WriteTypes(const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_) {
+ CompilationUnitWriter writer(this);
+ writer.Write(types);
+ }
+
void End() {
builder_->GetDebugInfo()->End();
builder_->WritePatches(".debug_info.oat_patches",
@@ -924,7 +1046,9 @@ class DebugLineWriter {
// Returns the number of bytes written.
size_t WriteCompilationUnit(CompilationUnit& compilation_unit) {
const bool is64bit = Is64BitInstructionSet(builder_->GetIsa());
- const Elf_Addr text_address = builder_->GetText()->GetAddress();
+ const Elf_Addr text_address = builder_->GetText()->Exists()
+ ? builder_->GetText()->GetAddress()
+ : 0;
compilation_unit.debug_line_offset_ = builder_->GetDebugLine()->GetSize();
@@ -1102,9 +1226,27 @@ class DebugLineWriter {
std::vector<uintptr_t> debug_line_patches;
};
+// Get all types loaded by the runtime.
+static std::vector<mirror::Class*> GetLoadedRuntimeTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::vector<mirror::Class*> result;
+ class CollectClasses : public ClassVisitor {
+ public:
+ virtual bool Visit(mirror::Class* klass) {
+ classes_->push_back(klass);
+ return true;
+ }
+ std::vector<mirror::Class*>* classes_;
+ };
+ CollectClasses visitor;
+ visitor.classes_ = &result;
+ Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
+ return result;
+}
+
template<typename ElfTypes>
-void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos) {
+static void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
+ bool write_loaded_runtime_types,
+ const ArrayRef<const MethodDebugInfo>& method_infos) {
// Group the methods into compilation units based on source file.
std::vector<CompilationUnit> compilation_units;
const char* last_source_file = nullptr;
@@ -1122,7 +1264,7 @@ void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
}
// Write .debug_line section.
- {
+ if (!compilation_units.empty()) {
DebugLineWriter<ElfTypes> line_writer(builder);
line_writer.Start();
for (auto& compilation_unit : compilation_units) {
@@ -1132,12 +1274,19 @@ void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
}
// Write .debug_info section.
- {
+ if (!compilation_units.empty() || write_loaded_runtime_types) {
DebugInfoWriter<ElfTypes> info_writer(builder);
info_writer.Start();
for (const auto& compilation_unit : compilation_units) {
info_writer.WriteCompilationUnit(compilation_unit);
}
+ if (write_loaded_runtime_types) {
+ Thread* self = Thread::Current();
+ // The lock prevents the classes being moved by the GC.
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ std::vector<mirror::Class*> types = GetLoadedRuntimeTypes();
+ info_writer.WriteTypes(ArrayRef<mirror::Class*>(types.data(), types.size()));
+ }
info_writer.End();
}
}
@@ -1173,11 +1322,13 @@ void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder,
name += " [DEDUPED]";
}
+ const auto* text = builder->GetText()->Exists() ? builder->GetText() : nullptr;
+ const bool is_relative = (text != nullptr);
uint32_t low_pc = info.low_pc_;
// Add in code delta, e.g., thumb bit 0 for Thumb2 code.
low_pc += info.compiled_method_->CodeDelta();
- symtab->Add(strtab->Write(name), builder->GetText(), low_pc,
- true, info.high_pc_ - info.low_pc_, STB_GLOBAL, STT_FUNC);
+ symtab->Add(strtab->Write(name), text, low_pc,
+ is_relative, info.high_pc_ - info.low_pc_, STB_GLOBAL, STT_FUNC);
// Conforming to aaelf, add $t mapping symbol to indicate start of a sequence of thumb2
// instructions, so that disassembler tools can correctly disassemble.
@@ -1185,8 +1336,8 @@ void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder,
// requires it to match function symbol. Just address 0 does not work.
if (info.compiled_method_->GetInstructionSet() == kThumb2) {
if (!generated_mapping_symbol || !kGenerateSingleArmMappingSymbol) {
- symtab->Add(strtab->Write("$t"), builder->GetText(), info.low_pc_ & ~1,
- true, 0, STB_LOCAL, STT_NOTYPE);
+ symtab->Add(strtab->Write("$t"), text, info.low_pc_ & ~1,
+ is_relative, 0, STB_LOCAL, STT_NOTYPE);
generated_mapping_symbol = true;
}
}
@@ -1202,25 +1353,89 @@ void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder,
template <typename ElfTypes>
void WriteDebugInfo(ElfBuilder<ElfTypes>* builder,
+ bool write_loaded_runtime_types,
const ArrayRef<const MethodDebugInfo>& method_infos,
CFIFormat cfi_format) {
- if (!method_infos.empty()) {
- // Add methods to .symtab.
- WriteDebugSymbols(builder, method_infos);
- // Generate CFI (stack unwinding information).
- WriteCFISection(builder, method_infos, cfi_format);
- // Write DWARF .debug_* sections.
- WriteDebugSections(builder, method_infos);
+ // Add methods to .symtab.
+ WriteDebugSymbols(builder, method_infos);
+ // Generate CFI (stack unwinding information).
+ WriteCFISection(builder, method_infos, cfi_format);
+ // Write DWARF .debug_* sections.
+ WriteDebugSections(builder, write_loaded_runtime_types, method_infos);
+}
+
+template <typename ElfTypes>
+static ArrayRef<const uint8_t> WriteDebugElfFileForMethodInternal(
+ const dwarf::MethodDebugInfo& method_info) {
+ const InstructionSet isa = method_info.compiled_method_->GetInstructionSet();
+ std::vector<uint8_t> buffer;
+ buffer.reserve(KB);
+ VectorOutputStream out("Debug ELF file", &buffer);
+ std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out));
+ builder->Start();
+ WriteDebugInfo(builder.get(),
+ false,
+ ArrayRef<const MethodDebugInfo>(&method_info, 1),
+ DW_DEBUG_FRAME_FORMAT);
+ builder->End();
+ CHECK(builder->Good());
+ // Make a copy of the buffer. We want to shrink it anyway.
+ uint8_t* result = new uint8_t[buffer.size()];
+ CHECK(result != nullptr);
+ memcpy(result, buffer.data(), buffer.size());
+ return ArrayRef<const uint8_t>(result, buffer.size());
+}
+
+ArrayRef<const uint8_t> WriteDebugElfFileForMethod(const dwarf::MethodDebugInfo& method_info) {
+ const InstructionSet isa = method_info.compiled_method_->GetInstructionSet();
+ if (Is64BitInstructionSet(isa)) {
+ return WriteDebugElfFileForMethodInternal<ElfTypes64>(method_info);
+ } else {
+ return WriteDebugElfFileForMethodInternal<ElfTypes32>(method_info);
+ }
+}
+
+template <typename ElfTypes>
+static ArrayRef<const uint8_t> WriteDebugElfFileForClassInternal(const InstructionSet isa,
+ mirror::Class* type)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::vector<uint8_t> buffer;
+ buffer.reserve(KB);
+ VectorOutputStream out("Debug ELF file", &buffer);
+ std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out));
+ builder->Start();
+
+ DebugInfoWriter<ElfTypes> info_writer(builder.get());
+ info_writer.Start();
+ info_writer.WriteTypes(ArrayRef<mirror::Class*>(&type, 1));
+ info_writer.End();
+
+ builder->End();
+ CHECK(builder->Good());
+ // Make a copy of the buffer. We want to shrink it anyway.
+ uint8_t* result = new uint8_t[buffer.size()];
+ CHECK(result != nullptr);
+ memcpy(result, buffer.data(), buffer.size());
+ return ArrayRef<const uint8_t>(result, buffer.size());
+}
+
+ArrayRef<const uint8_t> WriteDebugElfFileForClass(const InstructionSet isa, mirror::Class* type) {
+ if (Is64BitInstructionSet(isa)) {
+ return WriteDebugElfFileForClassInternal<ElfTypes64>(isa, type);
+ } else {
+ return WriteDebugElfFileForClassInternal<ElfTypes32>(isa, type);
}
}
// Explicit instantiations
template void WriteDebugInfo<ElfTypes32>(
ElfBuilder<ElfTypes32>* builder,
+ bool write_loaded_runtime_types,
const ArrayRef<const MethodDebugInfo>& method_infos,
CFIFormat cfi_format);
template void WriteDebugInfo<ElfTypes64>(
ElfBuilder<ElfTypes64>* builder,
+ bool write_loaded_runtime_types,
const ArrayRef<const MethodDebugInfo>& method_infos,
CFIFormat cfi_format);
diff --git a/compiler/elf_writer_debug.h b/compiler/elf_writer_debug.h
index 7ec0be185a..91da00f97a 100644
--- a/compiler/elf_writer_debug.h
+++ b/compiler/elf_writer_debug.h
@@ -17,19 +17,30 @@
#ifndef ART_COMPILER_ELF_WRITER_DEBUG_H_
#define ART_COMPILER_ELF_WRITER_DEBUG_H_
-#include "elf_builder.h"
+#include "base/macros.h"
+#include "base/mutex.h"
#include "dwarf/dwarf_constants.h"
-#include "oat_writer.h"
+#include "elf_builder.h"
#include "utils/array_ref.h"
namespace art {
+namespace mirror {
+class Class;
+}
namespace dwarf {
+struct MethodDebugInfo;
template <typename ElfTypes>
void WriteDebugInfo(ElfBuilder<ElfTypes>* builder,
+ bool write_loaded_runtime_types,
const ArrayRef<const MethodDebugInfo>& method_infos,
CFIFormat cfi_format);
+ArrayRef<const uint8_t> WriteDebugElfFileForMethod(const dwarf::MethodDebugInfo& method_info);
+
+ArrayRef<const uint8_t> WriteDebugElfFileForClass(const InstructionSet isa, mirror::Class* type)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
} // namespace dwarf
} // namespace art
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 7b1bdd72e5..a67f3bd1a9 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -152,7 +152,7 @@ template <typename ElfTypes>
void ElfWriterQuick<ElfTypes>::WriteDebugInfo(
const ArrayRef<const dwarf::MethodDebugInfo>& method_infos) {
if (compiler_options_->GetGenerateDebugInfo()) {
- dwarf::WriteDebugInfo(builder_.get(), method_infos, kCFIFormat);
+ dwarf::WriteDebugInfo(builder_.get(), /* write_types */ true, method_infos, kCFIFormat);
}
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 17d0f61a34..d0bb201d69 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -76,23 +76,35 @@ static constexpr bool kBinObjects = true;
// Return true if an object is already in an image space.
bool ImageWriter::IsInBootImage(const void* obj) const {
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
if (!compile_app_image_) {
- DCHECK(boot_image_space_ == nullptr);
+ DCHECK(heap->GetBootImageSpaces().empty());
return false;
}
- const uint8_t* image_begin = boot_image_space_->Begin();
- // Real image end including ArtMethods and ArtField sections.
- const uint8_t* image_end = image_begin + boot_image_space_->GetImageHeader().GetImageSize();
- return image_begin <= obj && obj < image_end;
+ for (gc::space::ImageSpace* boot_image_space : heap->GetBootImageSpaces()) {
+ const uint8_t* image_begin = boot_image_space->Begin();
+ // Real image end including ArtMethods and ArtField sections.
+ const uint8_t* image_end = image_begin + boot_image_space->GetImageHeader().GetImageSize();
+ if (image_begin <= obj && obj < image_end) {
+ return true;
+ }
+ }
+ return false;
}
bool ImageWriter::IsInBootOatFile(const void* ptr) const {
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
if (!compile_app_image_) {
- DCHECK(boot_image_space_ == nullptr);
+ DCHECK(heap->GetBootImageSpaces().empty());
return false;
}
- const ImageHeader& image_header = boot_image_space_->GetImageHeader();
- return image_header.GetOatFileBegin() <= ptr && ptr < image_header.GetOatFileEnd();
+ for (gc::space::ImageSpace* boot_image_space : heap->GetBootImageSpaces()) {
+ const ImageHeader& image_header = boot_image_space->GetImageHeader();
+ if (image_header.GetOatFileBegin() <= ptr && ptr < image_header.GetOatFileEnd()) {
+ return true;
+ }
+ }
+ return false;
}
static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
@@ -109,14 +121,6 @@ static void CheckNoDexObjects() {
bool ImageWriter::PrepareImageAddressSpace() {
target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet());
gc::Heap* const heap = Runtime::Current()->GetHeap();
- // Cache boot image space.
- for (gc::space::ContinuousSpace* space : heap->GetContinuousSpaces()) {
- if (space->IsImageSpace()) {
- CHECK(compile_app_image_);
- CHECK(boot_image_space_ == nullptr) << "Multiple image spaces";
- boot_image_space_ = space->AsImageSpace();
- }
- }
{
ScopedObjectAccess soa(Thread::Current());
PruneNonImageClasses(); // Remove junk
@@ -205,9 +209,6 @@ bool ImageWriter::Write(int image_fd,
oat_header.GetQuickResolutionTrampolineOffset();
image_info.oat_address_offsets_[kOatAddressQuickToInterpreterBridge] =
oat_header.GetQuickToInterpreterBridgeOffset();
- } else {
- // Other oat files use the primary trampolines.
- // TODO: Dummy values to protect usage? b/26317072
}
@@ -635,11 +636,11 @@ ImageWriter::BinSlot ImageWriter::GetImageBinSlot(mirror::Object* object) const
bool ImageWriter::AllocMemory() {
for (const char* oat_filename : oat_filenames_) {
ImageInfo& image_info = GetImageInfo(oat_filename);
- const size_t length = RoundUp(image_objects_offset_begin_ +
- GetBinSizeSum(image_info) +
- intern_table_bytes_ +
- class_table_bytes_,
- kPageSize);
+ ImageSection unused_sections[ImageHeader::kSectionCount];
+ const size_t length = RoundUp(
+ image_info.CreateImageSections(target_ptr_size_, unused_sections),
+ kPageSize);
+
std::string error_msg;
image_info.image_.reset(MemMap::MapAnonymous("image writer image",
nullptr,
@@ -909,14 +910,17 @@ void ImageWriter::CalculateObjectBinSlots(Object* obj) {
DCHECK(obj != nullptr);
// if it is a string, we want to intern it if its not interned.
if (obj->GetClass()->IsStringClass()) {
+ const char* oat_filename = GetOatFilename(obj);
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+
// we must be an interned string that was forward referenced and already assigned
if (IsImageBinSlotAssigned(obj)) {
- DCHECK_EQ(obj, obj->AsString()->Intern());
+ DCHECK_EQ(obj, image_info.intern_table_->InternStrongImageString(obj->AsString()));
return;
}
// InternImageString allows us to intern while holding the heap bitmap lock. This is safe since
// we are guaranteed to not have GC during image writing.
- mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrongImageString(
+ mirror::String* const interned = image_info.intern_table_->InternStrongImageString(
obj->AsString());
if (obj != interned) {
if (!IsImageBinSlotAssigned(interned)) {
@@ -1067,6 +1071,13 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
};
const char* oat_file = GetOatFilenameForDexCache(dex_cache);
ImageInfo& image_info = GetImageInfo(oat_file);
+ {
+ // Note: This table is only accessed from the image writer, so the lock is technically
+ // unnecessary.
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ // Insert in the class table for this iamge.
+ image_info.class_table_->Insert(as_klass);
+ }
for (LengthPrefixedArray<ArtField>* cur_fields : fields) {
// Total array length including header.
if (cur_fields != nullptr) {
@@ -1249,6 +1260,18 @@ void ImageWriter::CalculateNewObjectOffsets() {
// Calculate size of the dex cache arrays slot and prepare offsets.
PrepareDexCacheArraySlots();
+ // Calculate the sizes of the intern tables and class tables.
+ for (const char* oat_filename : oat_filenames_) {
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ // Calculate how big the intern table will be after being serialized.
+ InternTable* const intern_table = image_info.intern_table_.get();
+ CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings";
+ image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
+ // Calculate the size of the class table.
+ ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ image_info.class_table_bytes_ += image_info.class_table_->WriteToMemory(nullptr);
+ }
+
// Calculate bin slot offsets.
for (const char* oat_filename : oat_filenames_) {
ImageInfo& image_info = GetImageInfo(oat_filename);
@@ -1275,18 +1298,11 @@ void ImageWriter::CalculateNewObjectOffsets() {
ImageInfo& image_info = GetImageInfo(oat_filename);
image_info.image_begin_ = global_image_begin_ + image_offset;
image_info.image_offset_ = image_offset;
- size_t native_sections_size = image_info.bin_slot_sizes_[kBinArtField] +
- image_info.bin_slot_sizes_[kBinArtMethodDirty] +
- image_info.bin_slot_sizes_[kBinArtMethodClean] +
- image_info.bin_slot_sizes_[kBinDexCacheArray] +
- intern_table_bytes_ +
- class_table_bytes_;
- size_t image_objects = RoundUp(image_info.image_end_, kPageSize);
- size_t bitmap_size =
- RoundUp(gc::accounting::ContinuousSpaceBitmap::ComputeBitmapSize(image_objects), kPageSize);
- size_t heap_size = gc::accounting::ContinuousSpaceBitmap::ComputeHeapSize(bitmap_size);
- size_t max = std::max(heap_size, image_info.image_end_ + native_sections_size + bitmap_size);
- image_info.image_size_ = RoundUp(max, kPageSize);
+ ImageSection unused_sections[ImageHeader::kSectionCount];
+ image_info.image_size_ = RoundUp(
+ image_info.CreateImageSections(target_ptr_size_, unused_sections),
+ kPageSize);
+ // There should be no gaps until the next image.
image_offset += image_info.image_size_;
}
@@ -1310,89 +1326,69 @@ void ImageWriter::CalculateNewObjectOffsets() {
relocation.offset += image_info.bin_slot_offsets_[bin_type];
}
- /* TODO: Reenable the intern table and class table. b/26317072
- // Calculate how big the intern table will be after being serialized.
- InternTable* const intern_table = runtime->GetInternTable();
- CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings";
- intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
-
- // Write out the class table.
- ClassLinker* class_linker = runtime->GetClassLinker();
- if (boot_image_space_ == nullptr) {
- // Compiling the boot image, add null class loader.
- class_loaders_.insert(nullptr);
- }
- // class_loaders_ usually will not be empty, but may be empty if we attempt to create an image
- // with no classes.
- if (class_loaders_.size() == 1u) {
- // Only write the class table if we have exactly one class loader. There may be cases where
- // there are multiple class loaders if a class path is passed to dex2oat.
- ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (mirror::ClassLoader* loader : class_loaders_) {
- ClassTable* table = class_linker->ClassTableForClassLoader(loader);
- CHECK(table != nullptr);
- class_table_bytes_ += table->WriteToMemory(nullptr);
- }
- }
- */
-
// Note that image_info.image_end_ is left at end of used mirror object section.
}
-void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
- CHECK_NE(0U, oat_loaded_size);
- const char* oat_filename = oat_file_->GetLocation().c_str();
- ImageInfo& image_info = GetImageInfo(oat_filename);
- const uint8_t* oat_file_begin = GetOatFileBegin(oat_filename);
- const uint8_t* oat_file_end = oat_file_begin + oat_loaded_size;
- image_info.oat_data_begin_ = const_cast<uint8_t*>(oat_file_begin) + oat_data_offset;
- const uint8_t* oat_data_end = image_info.oat_data_begin_ + oat_file_->Size();
- image_info.oat_size_ = oat_file_->Size();
-
- // Create the image sections.
- ImageSection sections[ImageHeader::kSectionCount];
+size_t ImageWriter::ImageInfo::CreateImageSections(size_t target_ptr_size,
+ ImageSection* out_sections) const {
+ DCHECK(out_sections != nullptr);
// Objects section
- auto* objects_section = &sections[ImageHeader::kSectionObjects];
- *objects_section = ImageSection(0u, image_info.image_end_);
+ auto* objects_section = &out_sections[ImageHeader::kSectionObjects];
+ *objects_section = ImageSection(0u, image_end_);
size_t cur_pos = objects_section->End();
// Add field section.
- auto* field_section = &sections[ImageHeader::kSectionArtFields];
- *field_section = ImageSection(cur_pos, image_info.bin_slot_sizes_[kBinArtField]);
- CHECK_EQ(image_info.bin_slot_offsets_[kBinArtField], field_section->Offset());
+ auto* field_section = &out_sections[ImageHeader::kSectionArtFields];
+ *field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]);
+ CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset());
cur_pos = field_section->End();
// Round up to the alignment the required by the method section.
- cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size_));
+ cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size));
// Add method section.
- auto* methods_section = &sections[ImageHeader::kSectionArtMethods];
+ auto* methods_section = &out_sections[ImageHeader::kSectionArtMethods];
*methods_section = ImageSection(cur_pos,
- image_info.bin_slot_sizes_[kBinArtMethodClean] +
- image_info.bin_slot_sizes_[kBinArtMethodDirty]);
- CHECK_EQ(image_info.bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
+ bin_slot_sizes_[kBinArtMethodClean] +
+ bin_slot_sizes_[kBinArtMethodDirty]);
+ CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
cur_pos = methods_section->End();
// Add dex cache arrays section.
- auto* dex_cache_arrays_section = &sections[ImageHeader::kSectionDexCacheArrays];
- *dex_cache_arrays_section = ImageSection(cur_pos, image_info.bin_slot_sizes_[kBinDexCacheArray]);
- CHECK_EQ(image_info.bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset());
+ auto* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
+ *dex_cache_arrays_section = ImageSection(cur_pos, bin_slot_sizes_[kBinDexCacheArray]);
+ CHECK_EQ(bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset());
cur_pos = dex_cache_arrays_section->End();
// Round up to the alignment the string table expects. See HashSet::WriteToMemory.
cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
// Calculate the size of the interned strings.
- auto* interned_strings_section = &sections[ImageHeader::kSectionInternedStrings];
+ auto* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
*interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
cur_pos = interned_strings_section->End();
// Round up to the alignment the class table expects. See HashSet::WriteToMemory.
cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
// Calculate the size of the class table section.
- auto* class_table_section = &sections[ImageHeader::kSectionClassTable];
+ auto* class_table_section = &out_sections[ImageHeader::kSectionClassTable];
*class_table_section = ImageSection(cur_pos, class_table_bytes_);
cur_pos = class_table_section->End();
// Image end goes right before the start of the image bitmap.
- const size_t image_end = static_cast<uint32_t>(cur_pos);
+ return cur_pos;
+}
+
+void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
+ CHECK_NE(0U, oat_loaded_size);
+ const char* oat_filename = oat_file_->GetLocation().c_str();
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ const uint8_t* oat_file_begin = GetOatFileBegin(oat_filename);
+ const uint8_t* oat_file_end = oat_file_begin + oat_loaded_size;
+ image_info.oat_data_begin_ = const_cast<uint8_t*>(oat_file_begin) + oat_data_offset;
+ const uint8_t* oat_data_end = image_info.oat_data_begin_ + oat_file_->Size();
+ image_info.oat_size_ = oat_file_->Size();
+
+ // Create the image sections.
+ ImageSection sections[ImageHeader::kSectionCount];
+ const size_t image_end = image_info.CreateImageSections(target_ptr_size_, sections);
+
// Finally bitmap section.
const size_t bitmap_bytes = image_info.image_bitmap_->Size();
auto* bitmap_section = &sections[ImageHeader::kSectionImageBitmap];
- *bitmap_section = ImageSection(RoundUp(cur_pos, kPageSize), RoundUp(bitmap_bytes, kPageSize));
- cur_pos = bitmap_section->End();
+ *bitmap_section = ImageSection(RoundUp(image_end, kPageSize), RoundUp(bitmap_bytes, kPageSize));
if (VLOG_IS_ON(compiler)) {
LOG(INFO) << "Creating header for " << oat_filename;
size_t idx = 0;
@@ -1444,7 +1440,7 @@ class FixupRootVisitor : public RootVisitor {
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
- *roots[i] = ImageAddress(*roots[i]);
+ *roots[i] = image_writer_->GetImageAddress(*roots[i]);
}
}
@@ -1452,19 +1448,12 @@ class FixupRootVisitor : public RootVisitor {
const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
- roots[i]->Assign(ImageAddress(roots[i]->AsMirrorPtr()));
+ roots[i]->Assign(image_writer_->GetImageAddress(roots[i]->AsMirrorPtr()));
}
}
private:
ImageWriter* const image_writer_;
-
- mirror::Object* ImageAddress(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
- const size_t offset = image_writer_->GetImageOffset(obj);
- auto* const dest = reinterpret_cast<Object*>(image_writer_->global_image_begin_ + offset);
- VLOG(compiler) << "Update root from " << obj << " to " << dest;
- return dest;
- }
};
void ImageWriter::CopyAndFixupNativeData() {
@@ -1536,54 +1525,48 @@ void ImageWriter::CopyAndFixupNativeData() {
}
FixupRootVisitor root_visitor(this);
- /* TODO: Reenable the intern table and class table
// Write the intern table into the image.
- const ImageSection& intern_table_section = image_header->GetImageSection(
- ImageHeader::kSectionInternedStrings);
- Runtime* const runtime = Runtime::Current();
- InternTable* const intern_table = runtime->GetInternTable();
- uint8_t* const intern_table_memory_ptr =
- image_info.image_->Begin() + intern_table_section.Offset();
- const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr);
- CHECK_EQ(intern_table_bytes, intern_table_bytes_);
- // Fixup the pointers in the newly written intern table to contain image addresses.
- InternTable temp_intern_table;
- // Note that we require that ReadFromMemory does not make an internal copy of the elements so that
- // the VisitRoots() will update the memory directly rather than the copies.
- // This also relies on visit roots not doing any verification which could fail after we update
- // the roots to be the image addresses.
- temp_intern_table.ReadFromMemory(intern_table_memory_ptr);
- CHECK_EQ(temp_intern_table.Size(), intern_table->Size());
- temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots);
-
+ if (image_info.intern_table_bytes_ > 0) {
+ const ImageSection& intern_table_section = image_header->GetImageSection(
+ ImageHeader::kSectionInternedStrings);
+ InternTable* const intern_table = image_info.intern_table_.get();
+ uint8_t* const intern_table_memory_ptr =
+ image_info.image_->Begin() + intern_table_section.Offset();
+ const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr);
+ CHECK_EQ(intern_table_bytes, image_info.intern_table_bytes_);
+ // Fixup the pointers in the newly written intern table to contain image addresses.
+ InternTable temp_intern_table;
+ // Note that we require that ReadFromMemory does not make an internal copy of the elements so that
+ // the VisitRoots() will update the memory directly rather than the copies.
+ // This also relies on visit roots not doing any verification which could fail after we update
+ // the roots to be the image addresses.
+ temp_intern_table.AddTableFromMemory(intern_table_memory_ptr);
+ CHECK_EQ(temp_intern_table.Size(), intern_table->Size());
+ temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots);
+ }
// Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple
// class loaders. Writing multiple class tables into the image is currently unsupported.
- if (class_table_bytes_ > 0u) {
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ if (image_info.class_table_bytes_ > 0u) {
const ImageSection& class_table_section = image_header->GetImageSection(
ImageHeader::kSectionClassTable);
uint8_t* const class_table_memory_ptr =
image_info.image_->Begin() + class_table_section.Offset();
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- size_t class_table_bytes = 0;
- for (mirror::ClassLoader* loader : class_loaders_) {
- ClassTable* table = class_linker->ClassTableForClassLoader(loader);
- CHECK(table != nullptr);
- uint8_t* memory_ptr = class_table_memory_ptr + class_table_bytes;
- class_table_bytes += table->WriteToMemory(memory_ptr);
- // Fixup the pointers in the newly written class table to contain image addresses. See
- // above comment for intern tables.
- ClassTable temp_class_table;
- temp_class_table.ReadFromMemory(memory_ptr);
- CHECK_EQ(temp_class_table.NumZygoteClasses(), table->NumNonZygoteClasses() +
- table->NumZygoteClasses());
- BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(&root_visitor,
- RootInfo(kRootUnknown));
- temp_class_table.VisitRoots(buffered_visitor);
- }
- CHECK_EQ(class_table_bytes, class_table_bytes_);
+
+ ClassTable* table = image_info.class_table_.get();
+ CHECK(table != nullptr);
+ const size_t class_table_bytes = table->WriteToMemory(class_table_memory_ptr);
+ CHECK_EQ(class_table_bytes, image_info.class_table_bytes_);
+ // Fixup the pointers in the newly written class table to contain image addresses. See
+ // above comment for intern tables.
+ ClassTable temp_class_table;
+ temp_class_table.ReadFromMemory(class_table_memory_ptr);
+ CHECK_EQ(temp_class_table.NumZygoteClasses(), table->NumNonZygoteClasses() +
+ table->NumZygoteClasses());
+ BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(&root_visitor,
+ RootInfo(kRootUnknown));
+ temp_class_table.VisitRoots(buffered_visitor);
}
- */
}
void ImageWriter::CopyAndFixupObjects() {
@@ -1991,7 +1974,7 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
copy->SetDeclaringClass(GetImageAddress(orig->GetDeclaringClassUnchecked()));
const char* oat_filename;
- if (orig->IsRuntimeMethod()) {
+ if (orig->IsRuntimeMethod() || compile_app_image_) {
oat_filename = default_oat_filename_;
} else {
auto it = dex_file_oat_filename_map_.find(orig->GetDexFile());
@@ -2110,7 +2093,6 @@ uint32_t ImageWriter::BinSlot::GetIndex() const {
}
uint8_t* ImageWriter::GetOatFileBegin(const char* oat_filename) const {
- // DCHECK_GT(intern_table_bytes_, 0u); TODO: Reenable intern table and class table.
uintptr_t last_image_end = 0;
for (const char* oat_fn : oat_filenames_) {
const ImageInfo& image_info = GetConstImageInfo(oat_fn);
@@ -2197,4 +2179,37 @@ void ImageWriter::UpdateOatFile(const char* oat_filename) {
}
}
+ImageWriter::ImageWriter(
+ const CompilerDriver& compiler_driver,
+ uintptr_t image_begin,
+ bool compile_pic,
+ bool compile_app_image,
+ ImageHeader::StorageMode image_storage_mode,
+ const std::vector<const char*> oat_filenames,
+ const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map)
+ : compiler_driver_(compiler_driver),
+ global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
+ image_objects_offset_begin_(0),
+ oat_file_(nullptr),
+ compile_pic_(compile_pic),
+ compile_app_image_(compile_app_image),
+ target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
+ image_method_array_(ImageHeader::kImageMethodsCount),
+ dirty_methods_(0u),
+ clean_methods_(0u),
+ image_storage_mode_(image_storage_mode),
+ dex_file_oat_filename_map_(dex_file_oat_filename_map),
+ oat_filenames_(oat_filenames),
+ default_oat_filename_(oat_filenames[0]) {
+ CHECK_NE(image_begin, 0U);
+ for (const char* oat_filename : oat_filenames) {
+ image_info_map_.emplace(oat_filename, ImageInfo());
+ }
+ std::fill_n(image_methods_, arraysize(image_methods_), nullptr);
+}
+
+ImageWriter::ImageInfo::ImageInfo()
+ : intern_table_(new InternTable),
+ class_table_(new ClassTable) {}
+
} // namespace art
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 78297ae645..ad690389e9 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -47,6 +47,8 @@ class ImageSpace;
} // namespace space
} // namespace gc
+class ClassTable;
+
static constexpr int kInvalidImageFd = -1;
// Write a Space built during compilation for use during execution.
@@ -58,33 +60,7 @@ class ImageWriter FINAL {
bool compile_app_image,
ImageHeader::StorageMode image_storage_mode,
const std::vector<const char*> oat_filenames,
- const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map)
- : compiler_driver_(compiler_driver),
- global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
- image_objects_offset_begin_(0),
- oat_file_(nullptr),
- compile_pic_(compile_pic),
- compile_app_image_(compile_app_image),
- boot_image_space_(nullptr),
- target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
- intern_table_bytes_(0u),
- image_method_array_(ImageHeader::kImageMethodsCount),
- dirty_methods_(0u),
- clean_methods_(0u),
- class_table_bytes_(0u),
- image_storage_mode_(image_storage_mode),
- dex_file_oat_filename_map_(dex_file_oat_filename_map),
- oat_filenames_(oat_filenames),
- default_oat_filename_(oat_filenames[0]) {
- CHECK_NE(image_begin, 0U);
- for (const char* oat_filename : oat_filenames) {
- image_info_map_.emplace(oat_filename, ImageInfo());
- }
- std::fill_n(image_methods_, arraysize(image_methods_), nullptr);
- }
-
- ~ImageWriter() {
- }
+ const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map);
bool PrepareImageAddressSpace();
@@ -237,41 +213,40 @@ class ImageWriter FINAL {
};
struct ImageInfo {
- explicit ImageInfo()
- : image_begin_(nullptr),
- image_end_(RoundUp(sizeof(ImageHeader), kObjectAlignment)),
- image_roots_address_(0),
- image_offset_(0),
- image_size_(0),
- oat_offset_(0),
- bin_slot_sizes_(),
- bin_slot_offsets_(),
- bin_slot_count_() {}
+ ImageInfo();
+ ImageInfo(ImageInfo&&) = default;
+
+ // Create the image sections into the out sections variable, returns the size of the image
+ // excluding the bitmap.
+ size_t CreateImageSections(size_t target_ptr_size, ImageSection* out_sections) const;
std::unique_ptr<MemMap> image_; // Memory mapped for generating the image.
// Target begin of this image. Notes: It is not valid to write here, this is the address
// of the target image, not necessarily where image_ is mapped. The address is only valid
// after layouting (otherwise null).
- uint8_t* image_begin_;
+ uint8_t* image_begin_ = nullptr;
- size_t image_end_; // Offset to the free space in image_, initially size of image header.
- uint32_t image_roots_address_; // The image roots address in the image.
- size_t image_offset_; // Offset of this image from the start of the first image.
+ // Offset to the free space in image_, initially size of image header.
+ size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);
+ uint32_t image_roots_address_ = 0; // The image roots address in the image.
+ size_t image_offset_ = 0; // Offset of this image from the start of the first image.
// Image size is the *address space* covered by this image. As the live bitmap is aligned
// to the page size, the live bitmap will cover more address space than necessary. But live
// bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size.
// The next image may only start at image_begin_ + image_size_ (which is guaranteed to be
// page-aligned).
- size_t image_size_;
+ size_t image_size_ = 0;
// Oat data.
- size_t oat_offset_; // Offset of the oat file for this image from start of oat files. This is
- // valid when the previous oat file has been written.
- uint8_t* oat_data_begin_; // Start of oatdata in the corresponding oat file. This is
- // valid when the images have been layed out.
- size_t oat_size_; // Size of the corresponding oat data.
+ // Offset of the oat file for this image from start of oat files. This is
+ // valid when the previous oat file has been written.
+ size_t oat_offset_ = 0;
+ // Start of oatdata in the corresponding oat file. This is
+ // valid when the images have been layed out.
+ uint8_t* oat_data_begin_ = nullptr;
+ size_t oat_size_ = 0; // Size of the corresponding oat data.
// Image bitmap which lets us know where the objects inside of the image reside.
std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
@@ -280,12 +255,24 @@ class ImageWriter FINAL {
SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
// Offset from oat_data_begin_ to the stubs.
- uint32_t oat_address_offsets_[kOatAddressCount];
+ uint32_t oat_address_offsets_[kOatAddressCount] = {};
// Bin slot tracking for dirty object packing.
- size_t bin_slot_sizes_[kBinSize]; // Number of bytes in a bin.
- size_t bin_slot_offsets_[kBinSize]; // Number of bytes in previous bins.
- size_t bin_slot_count_[kBinSize]; // Number of objects in a bin.
+ size_t bin_slot_sizes_[kBinSize] = {}; // Number of bytes in a bin.
+ size_t bin_slot_offsets_[kBinSize] = {}; // Number of bytes in previous bins.
+ size_t bin_slot_count_[kBinSize] = {}; // Number of objects in a bin.
+
+ // Cached size of the intern table for when we allocate memory.
+ size_t intern_table_bytes_ = 0;
+
+ // Number of image class table bytes.
+ size_t class_table_bytes_ = 0;
+
+ // Intern table associated with this image for serialization.
+ std::unique_ptr<InternTable> intern_table_;
+
+ // Class table associated with this image for serialization.
+ std::unique_ptr<ClassTable> class_table_;
};
// We use the lock word to store the offset of the object in the image.
@@ -483,18 +470,12 @@ class ImageWriter FINAL {
const bool compile_pic_;
const bool compile_app_image_;
- // Cache the boot image space in this class for faster lookups.
- gc::space::ImageSpace* boot_image_space_;
-
// Size of pointers on the target architecture.
size_t target_ptr_size_;
// Mapping of oat filename to image data.
std::unordered_map<std::string, ImageInfo> image_info_map_;
- // Cached size of the intern table for when we allocate memory.
- size_t intern_table_bytes_;
-
// ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
// have one entry per art field for convenience. ArtFields are placed right after the end of the
// image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
@@ -528,9 +509,6 @@ class ImageWriter FINAL {
// null is a valid entry.
std::unordered_set<mirror::ClassLoader*> class_loaders_;
- // Number of image class table bytes.
- size_t class_table_bytes_;
-
// Which mode the image is stored as, see image.h
const ImageHeader::StorageMode image_storage_mode_;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index b323d24038..bc51ed6e6a 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -22,6 +22,7 @@
#include "base/stringpiece.h"
#include "base/time_utils.h"
#include "base/timing_logger.h"
+#include "base/unix_file/fd_file.h"
#include "compiler_callbacks.h"
#include "dex/pass_manager.h"
#include "dex/quick_compiler_callbacks.h"
@@ -42,11 +43,12 @@ JitCompiler* JitCompiler::Create() {
return new JitCompiler();
}
-extern "C" void* jit_load(CompilerCallbacks** callbacks) {
+extern "C" void* jit_load(CompilerCallbacks** callbacks, bool* generate_debug_info) {
VLOG(jit) << "loading jit compiler";
auto* const jit_compiler = JitCompiler::Create();
CHECK(jit_compiler != nullptr);
*callbacks = jit_compiler->GetCompilerCallbacks();
+ *generate_debug_info = jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo();
VLOG(jit) << "Done loading jit compiler";
return jit_compiler;
}
@@ -155,14 +157,33 @@ JitCompiler::JitCompiler() : total_time_(0) {
/* dump_cfg_append */ false,
cumulative_logger_.get(),
/* swap_fd */ -1,
- /* profile_file */ "",
- /* dex to oat map */ nullptr));
+ /* dex to oat map */ nullptr,
+ /* profile_compilation_info */ nullptr));
// Disable dedupe so we can remove compiled methods.
compiler_driver_->SetDedupeEnabled(false);
compiler_driver_->SetSupportBootImageFixup(false);
+
+ if (compiler_options_->GetGenerateDebugInfo()) {
+#ifdef __ANDROID__
+ const char* prefix = GetAndroidData();
+#else
+ const char* prefix = "/tmp";
+#endif
+ DCHECK_EQ(compiler_driver_->GetThreadCount(), 1u)
+ << "Generating debug info only works with one compiler thread";
+ std::string perf_filename = std::string(prefix) + "/perf-" + std::to_string(getpid()) + ".map";
+ perf_file_.reset(OS::CreateEmptyFileWriteOnly(perf_filename.c_str()));
+ if (perf_file_ == nullptr) {
+ LOG(FATAL) << "Could not create perf file at " << perf_filename;
+ }
+ }
}
JitCompiler::~JitCompiler() {
+ if (perf_file_ != nullptr) {
+ UNUSED(perf_file_->Flush());
+ UNUSED(perf_file_->Close());
+ }
}
bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method) {
@@ -188,6 +209,20 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method) {
ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(sizeof(void*));
JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
success = compiler_driver_->GetCompiler()->JitCompile(self, code_cache, method_to_compile);
+ if (success && compiler_options_->GetGenerateDebugInfo()) {
+ const void* ptr = method_to_compile->GetEntryPointFromQuickCompiledCode();
+ std::ostringstream stream;
+ stream << std::hex
+ << reinterpret_cast<uintptr_t>(ptr)
+ << " "
+ << code_cache->GetMemorySizeOfCodePointer(ptr)
+ << " "
+ << PrettyMethod(method_to_compile)
+ << std::endl;
+ std::string str = stream.str();
+ bool res = perf_file_->WriteFully(str.c_str(), str.size());
+ CHECK(res);
+ }
}
// Trim maps to reduce memory usage.
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 913a6d00ae..037a18ac7a 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -43,6 +43,9 @@ class JitCompiler {
size_t GetTotalCompileTime() const {
return total_time_;
}
+ CompilerOptions* GetCompilerOptions() const {
+ return compiler_options_.get();
+ }
private:
uint64_t total_time_;
@@ -53,6 +56,7 @@ class JitCompiler {
std::unique_ptr<CompilerCallbacks> callbacks_;
std::unique_ptr<CompilerDriver> compiler_driver_;
std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
+ std::unique_ptr<File> perf_file_;
JitCompiler();
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 877a674674..b10cc3534c 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -47,7 +47,7 @@ class RelativePatcherTest : public testing::Test {
driver_(&compiler_options_, &verification_results_, &inliner_map_,
Compiler::kQuick, instruction_set, nullptr,
false, nullptr, nullptr, nullptr, 1u,
- false, false, "", false, nullptr, -1, "", nullptr),
+ false, false, "", false, nullptr, -1, nullptr, nullptr),
error_msg_(),
instruction_set_(instruction_set),
features_(InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg_)),
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 58f46d69a2..9f7ffa5ace 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -121,7 +121,7 @@ class OatTest : public CommonCompilerTest {
false,
timer_.get(),
-1,
- "",
+ nullptr,
nullptr));
}
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index da2f9cbed5..eee6116098 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -1142,7 +1142,7 @@ class BCEVisitor : public HGraphVisitor {
loop->IsDefinedOutOfTheLoop(array_get->InputAt(1))) {
SideEffects loop_effects = side_effects_.GetLoopEffects(loop->GetHeader());
if (!array_get->GetSideEffects().MayDependOn(loop_effects)) {
- HoistToPreheaderOrDeoptBlock(loop, array_get);
+ HoistToPreHeaderOrDeoptBlock(loop, array_get);
}
}
}
@@ -1280,7 +1280,8 @@ class BCEVisitor : public HGraphVisitor {
// as runtime test. By restricting dynamic bce to unit strides (with a maximum of 32-bit
// iterations) and by not combining access (e.g. a[i], a[i-3], a[i+5] etc.), these tests
// correctly guard against any possible OOB (including arithmetic wrap-around cases).
- HBasicBlock* block = TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
+ TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
+ HBasicBlock* block = GetPreHeader(loop, instruction);
induction_range_.GenerateRangeCode(instruction, index, GetGraph(), block, &lower, &upper);
if (lower != nullptr) {
InsertDeopt(loop, block, new (GetGraph()->GetArena()) HAbove(lower, upper));
@@ -1358,7 +1359,7 @@ class BCEVisitor : public HGraphVisitor {
return true;
} else if (length->IsArrayLength() && length->GetBlock()->GetLoopInformation() == loop) {
if (CanHandleNullCheck(loop, length->InputAt(0), needs_taken_test)) {
- HoistToPreheaderOrDeoptBlock(loop, length);
+ HoistToPreHeaderOrDeoptBlock(loop, length);
return true;
}
}
@@ -1376,7 +1377,8 @@ class BCEVisitor : public HGraphVisitor {
HInstruction* array = check->InputAt(0);
if (loop->IsDefinedOutOfTheLoop(array)) {
// Generate: if (array == null) deoptimize;
- HBasicBlock* block = TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
+ TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
+ HBasicBlock* block = GetPreHeader(loop, check);
HInstruction* cond =
new (GetGraph()->GetArena()) HEqual(array, GetGraph()->GetNullConstant());
InsertDeopt(loop, block, cond);
@@ -1423,6 +1425,28 @@ class BCEVisitor : public HGraphVisitor {
return true;
}
+ /**
+ * Returns appropriate preheader for the loop, depending on whether the
+ * instruction appears in the loop header or proper loop-body.
+ */
+ HBasicBlock* GetPreHeader(HLoopInformation* loop, HInstruction* instruction) {
+ // Use preheader unless there is an earlier generated deoptimization block since
+ // hoisted expressions may depend on and/or used by the deoptimization tests.
+ HBasicBlock* header = loop->GetHeader();
+ const uint32_t loop_id = header->GetBlockId();
+ auto it = taken_test_loop_.find(loop_id);
+ if (it != taken_test_loop_.end()) {
+ HBasicBlock* block = it->second;
+ // If always taken, keep it that way by returning the original preheader,
+ // which can be found by following the predecessor of the true-block twice.
+ if (instruction->GetBlock() == header) {
+ return block->GetSinglePredecessor()->GetSinglePredecessor();
+ }
+ return block;
+ }
+ return loop->GetPreHeader();
+ }
+
/** Inserts a deoptimization test. */
void InsertDeopt(HLoopInformation* loop, HBasicBlock* block, HInstruction* condition) {
HInstruction* suspend = loop->GetSuspendCheck();
@@ -1437,28 +1461,17 @@ class BCEVisitor : public HGraphVisitor {
}
/** Hoists instruction out of the loop to preheader or deoptimization block. */
- void HoistToPreheaderOrDeoptBlock(HLoopInformation* loop, HInstruction* instruction) {
- // Use preheader unless there is an earlier generated deoptimization block since
- // hoisted expressions may depend on and/or used by the deoptimization tests.
- const uint32_t loop_id = loop->GetHeader()->GetBlockId();
- HBasicBlock* preheader = loop->GetPreHeader();
- HBasicBlock* block = preheader;
- auto it = taken_test_loop_.find(loop_id);
- if (it != taken_test_loop_.end()) {
- block = it->second;
- }
- // Hoist the instruction.
+ void HoistToPreHeaderOrDeoptBlock(HLoopInformation* loop, HInstruction* instruction) {
+ HBasicBlock* block = GetPreHeader(loop, instruction);
DCHECK(!instruction->HasEnvironment());
instruction->MoveBefore(block->GetLastInstruction());
}
/**
- * Adds a new taken-test structure to a loop if needed (and not already done).
+ * Adds a new taken-test structure to a loop if needed and not already done.
* The taken-test protects range analysis evaluation code to avoid any
* deoptimization caused by incorrect trip-count evaluation in non-taken loops.
*
- * Returns block in which deoptimizations/invariants can be put.
- *
* old_preheader
* |
* if_block <- taken-test protects deoptimization block
@@ -1490,16 +1503,11 @@ class BCEVisitor : public HGraphVisitor {
* array[i] = 0;
* }
*/
- HBasicBlock* TransformLoopForDeoptimizationIfNeeded(HLoopInformation* loop, bool needs_taken_test) {
- // Not needed (can use preheader), or already done (can reuse)?
+ void TransformLoopForDeoptimizationIfNeeded(HLoopInformation* loop, bool needs_taken_test) {
+ // Not needed (can use preheader) or already done (can reuse)?
const uint32_t loop_id = loop->GetHeader()->GetBlockId();
- if (!needs_taken_test) {
- return loop->GetPreHeader();
- } else {
- auto it = taken_test_loop_.find(loop_id);
- if (it != taken_test_loop_.end()) {
- return it->second;
- }
+ if (!needs_taken_test || taken_test_loop_.find(loop_id) != taken_test_loop_.end()) {
+ return;
}
// Generate top test structure.
@@ -1528,7 +1536,6 @@ class BCEVisitor : public HGraphVisitor {
if_block->AddInstruction(new (GetGraph()->GetArena()) HIf(condition));
taken_test_loop_.Put(loop_id, true_block);
- return true_block;
}
/**
@@ -1543,7 +1550,7 @@ class BCEVisitor : public HGraphVisitor {
* \ /
* x_1 = phi(x_0, null) <- synthetic phi
* |
- * header
+ * new_preheader
*/
void InsertPhiNodes() {
// Scan all new deoptimization blocks.
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 53d3615a41..ea0b9eca9a 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -997,6 +997,12 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
stack_map_stream_.EndStackMapEntry();
}
+bool CodeGenerator::HasStackMapAtCurrentPc() {
+ uint32_t pc = GetAssembler()->CodeSize();
+ size_t count = stack_map_stream_.GetNumberOfStackMaps();
+ return count > 0 && stack_map_stream_.GetStackMap(count - 1).native_pc_offset == pc;
+}
+
void CodeGenerator::RecordCatchBlockInfo() {
ArenaAllocator* arena = graph_->GetArena();
@@ -1320,12 +1326,6 @@ void CodeGenerator::ValidateInvokeRuntime(HInstruction* instruction, SlowPathCod
<< "instruction->DebugName()=" << instruction->DebugName()
<< " slow_path->GetDescription()=" << slow_path->GetDescription();
DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
- // Control flow would not come back into the code if a fatal slow
- // path is taken, so we do not care if it triggers GC.
- slow_path->IsFatal() ||
- // HDeoptimize is a special case: we know we are not coming back from
- // it into the code.
- instruction->IsDeoptimize() ||
// When read barriers are enabled, some instructions use a
// slow path to emit a read barrier, which does not trigger
// GC, is not fatal, nor is emitted by HDeoptimize
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index eade05d7b6..5958cd89bc 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -269,6 +269,8 @@ class CodeGenerator {
// Record native to dex mapping for a suspend point. Required by runtime.
void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr);
+ // Check whether we have already recorded mapping at this PC.
+ bool HasStackMapAtCurrentPc();
bool CanMoveNullCheckToUser(HNullCheck* null_check);
void MaybeRecordImplicitNullCheck(HInstruction* instruction);
@@ -611,7 +613,7 @@ class CodeGenerator {
ArenaVector<SlowPathCode*> slow_paths_;
- // The current slow path that we're generating code for.
+ // The current slow-path that we're generating code for.
SlowPathCode* current_slow_path_;
// The current block index in `block_order_` of the block
@@ -672,6 +674,122 @@ class CallingConvention {
DISALLOW_COPY_AND_ASSIGN(CallingConvention);
};
+/**
+ * A templated class SlowPathGenerator with a templated method NewSlowPath()
+ * that can be used by any code generator to share equivalent slow-paths with
+ * the objective of reducing generated code size.
+ *
+ * InstructionType: instruction that requires SlowPathCodeType
+ * SlowPathCodeType: subclass of SlowPathCode, with constructor SlowPathCodeType(InstructionType *)
+ */
+template <typename InstructionType>
+class SlowPathGenerator {
+ static_assert(std::is_base_of<HInstruction, InstructionType>::value,
+ "InstructionType is not a subclass of art::HInstruction");
+
+ public:
+ SlowPathGenerator(HGraph* graph, CodeGenerator* codegen)
+ : graph_(graph),
+ codegen_(codegen),
+ slow_path_map_(std::less<uint32_t>(), graph->GetArena()->Adapter(kArenaAllocSlowPaths)) {}
+
+ // Creates and adds a new slow-path, if needed, or returns existing one otherwise.
+ // Templating the method (rather than the whole class) on the slow-path type enables
+ // keeping this code at a generic, non architecture-specific place.
+ //
+ // NOTE: This approach assumes each InstructionType only generates one SlowPathCodeType.
+ // To relax this requirement, we would need some RTTI on the stored slow-paths,
+ // or template the class as a whole on SlowPathType.
+ template <typename SlowPathCodeType>
+ SlowPathCodeType* NewSlowPath(InstructionType* instruction) {
+ static_assert(std::is_base_of<SlowPathCode, SlowPathCodeType>::value,
+ "SlowPathCodeType is not a subclass of art::SlowPathCode");
+ static_assert(std::is_constructible<SlowPathCodeType, InstructionType*>::value,
+ "SlowPathCodeType is not constructible from InstructionType*");
+ // Iterate over potential candidates for sharing. Currently, only same-typed
+ // slow-paths with exactly the same dex-pc are viable candidates.
+ // TODO: pass dex-pc/slow-path-type to run-time to allow even more sharing?
+ const uint32_t dex_pc = instruction->GetDexPc();
+ auto iter = slow_path_map_.find(dex_pc);
+ if (iter != slow_path_map_.end()) {
+ auto candidates = iter->second;
+ for (const auto& it : candidates) {
+ InstructionType* other_instruction = it.first;
+ SlowPathCodeType* other_slow_path = down_cast<SlowPathCodeType*>(it.second);
+ // Determine if the instructions allow for slow-path sharing.
+ if (HaveSameLiveRegisters(instruction, other_instruction) &&
+ HaveSameStackMap(instruction, other_instruction)) {
+ // Can share: reuse existing one.
+ return other_slow_path;
+ }
+ }
+ } else {
+ // First time this dex-pc is seen.
+ iter = slow_path_map_.Put(dex_pc, {{}, {graph_->GetArena()->Adapter(kArenaAllocSlowPaths)}});
+ }
+ // Cannot share: create and add new slow-path for this particular dex-pc.
+ SlowPathCodeType* slow_path = new (graph_->GetArena()) SlowPathCodeType(instruction);
+ iter->second.emplace_back(std::make_pair(instruction, slow_path));
+ codegen_->AddSlowPath(slow_path);
+ return slow_path;
+ }
+
+ private:
+ // Tests if both instructions have same set of live physical registers. This ensures
+ // the slow-path has exactly the same preamble on saving these registers to stack.
+ bool HaveSameLiveRegisters(const InstructionType* i1, const InstructionType* i2) const {
+ const uint32_t core_spill = ~codegen_->GetCoreSpillMask();
+ const uint32_t fpu_spill = ~codegen_->GetFpuSpillMask();
+ RegisterSet* live1 = i1->GetLocations()->GetLiveRegisters();
+ RegisterSet* live2 = i2->GetLocations()->GetLiveRegisters();
+ return (((live1->GetCoreRegisters() & core_spill) ==
+ (live2->GetCoreRegisters() & core_spill)) &&
+ ((live1->GetFloatingPointRegisters() & fpu_spill) ==
+ (live2->GetFloatingPointRegisters() & fpu_spill)));
+ }
+
+ // Tests if both instructions have the same stack map. This ensures the interpreter
+ // will find exactly the same dex-registers at the same entries.
+ bool HaveSameStackMap(const InstructionType* i1, const InstructionType* i2) const {
+ DCHECK(i1->HasEnvironment());
+ DCHECK(i2->HasEnvironment());
+ // We conservatively test if the two instructions find exactly the same instructions
+ // and location in each dex-register. This guarantees they will have the same stack map.
+ HEnvironment* e1 = i1->GetEnvironment();
+ HEnvironment* e2 = i2->GetEnvironment();
+ if (e1->GetParent() != e2->GetParent() || e1->Size() != e2->Size()) {
+ return false;
+ }
+ for (size_t i = 0, sz = e1->Size(); i < sz; ++i) {
+ if (e1->GetInstructionAt(i) != e2->GetInstructionAt(i) ||
+ !e1->GetLocationAt(i).Equals(e2->GetLocationAt(i))) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ HGraph* const graph_;
+ CodeGenerator* const codegen_;
+
+ // Map from dex-pc to vector of already existing instruction/slow-path pairs.
+ ArenaSafeMap<uint32_t, ArenaVector<std::pair<InstructionType*, SlowPathCode*>>> slow_path_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathGenerator);
+};
+
+class InstructionCodeGenerator : public HGraphVisitor {
+ public:
+ InstructionCodeGenerator(HGraph* graph, CodeGenerator* codegen)
+ : HGraphVisitor(graph),
+ deopt_slow_paths_(graph, codegen) {}
+
+ protected:
+ // Add slow-path generator for each instruction/slow-path combination that desires sharing.
+ // TODO: under current regime, only deopt sharing make sense; extend later.
+ SlowPathGenerator<HDeoptimize> deopt_slow_paths_;
+};
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 9a1f2b8717..d64b8784e1 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -350,24 +350,24 @@ class TypeCheckSlowPathARM : public SlowPathCode {
class DeoptimizationSlowPathARM : public SlowPathCode {
public:
- explicit DeoptimizationSlowPathARM(HInstruction* instruction)
+ explicit DeoptimizationSlowPathARM(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
- arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
};
@@ -417,6 +417,56 @@ class ArraySetSlowPathARM : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM);
};
+// Slow path marking an object during a read barrier.
+class ReadBarrierMarkSlowPathARM : public SlowPathCode {
+ public:
+ ReadBarrierMarkSlowPathARM(HInstruction* instruction, Location out, Location obj)
+ : instruction_(instruction), out_(out), obj_(obj) {
+ DCHECK(kEmitCompilerReadBarrier);
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathARM"; }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ Register reg_out = out_.AsRegister<Register>();
+ DCHECK(locations->CanCall());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
+ DCHECK(instruction_->IsInstanceFieldGet() ||
+ instruction_->IsStaticFieldGet() ||
+ instruction_->IsArrayGet() ||
+ instruction_->IsLoadClass() ||
+ instruction_->IsLoadString() ||
+ instruction_->IsInstanceOf() ||
+ instruction_->IsCheckCast())
+ << "Unexpected instruction in read barrier marking slow path: "
+ << instruction_->DebugName();
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), obj_);
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierMark),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickReadBarrierMark, mirror::Object*, mirror::Object*>();
+ arm_codegen->Move32(out_, Location::RegisterLocation(R0));
+
+ RestoreLiveRegisters(codegen, locations);
+ __ b(GetExitLabel());
+ }
+
+ private:
+ HInstruction* const instruction_;
+ const Location out_;
+ const Location obj_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARM);
+};
+
// Slow path generating a read barrier for a heap reference.
class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode {
public:
@@ -438,7 +488,7 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode {
// to be instrumented, e.g.:
//
// __ LoadFromOffset(kLoadWord, out, out, offset);
- // codegen_->GenerateReadBarrier(instruction, out_loc, out_loc, out_loc, offset);
+ // codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset);
//
// In that case, we have lost the information about the original
// object, and the emitted read barrier cannot work properly.
@@ -454,7 +504,9 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode {
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
DCHECK(!instruction_->IsInvoke() ||
(instruction_->IsInvokeStaticOrDirect() &&
- instruction_->GetLocations()->Intrinsified()));
+ instruction_->GetLocations()->Intrinsified()))
+ << "Unexpected instruction in read barrier for heap reference slow path: "
+ << instruction_->DebugName();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -596,14 +648,18 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode {
class ReadBarrierForRootSlowPathARM : public SlowPathCode {
public:
ReadBarrierForRootSlowPathARM(HInstruction* instruction, Location out, Location root)
- : instruction_(instruction), out_(out), root_(root) {}
+ : instruction_(instruction), out_(out), root_(root) {
+ DCHECK(kEmitCompilerReadBarrier);
+ }
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
- DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString());
+ DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
+ << "Unexpected instruction in read barrier for GC root slow path: "
+ << instruction_->DebugName();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -857,7 +913,7 @@ void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
}
InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -1358,17 +1414,6 @@ void LocationsBuilderARM::VisitExit(HExit* exit) {
void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
-void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) {
- ShifterOperand operand;
- if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, right, &operand)) {
- __ cmp(left, operand);
- } else {
- Register temp = IP;
- __ LoadImmediate(temp, right);
- __ cmp(left, ShifterOperand(temp));
- }
-}
-
void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
Label* true_label,
Label* false_label) {
@@ -1434,7 +1479,7 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
int32_t val_low = Low32Bits(value);
int32_t val_high = High32Bits(value);
- GenerateCompareWithImmediate(left_high, val_high);
+ __ CmpConstant(left_high, val_high);
if (if_cond == kCondNE) {
__ b(true_label, ARMCondition(true_high_cond));
} else if (if_cond == kCondEQ) {
@@ -1444,7 +1489,7 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
__ b(false_label, ARMCondition(false_high_cond));
}
// Must be equal high, so compare the lows.
- GenerateCompareWithImmediate(left_low, val_low);
+ __ CmpConstant(left_low, val_low);
} else {
Register right_high = right.AsRegisterPairHigh<Register>();
Register right_low = right.AsRegisterPairLow<Register>();
@@ -1568,7 +1613,7 @@ void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instructio
__ cmp(left, ShifterOperand(right.AsRegister<Register>()));
} else {
DCHECK(right.IsConstant());
- GenerateCompareWithImmediate(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
+ __ CmpConstant(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
if (true_target == nullptr) {
__ b(false_target, ARMCondition(condition->GetOppositeCondition()));
@@ -1610,8 +1655,7 @@ void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) DeoptimizationSlowPathARM(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
@@ -1623,6 +1667,10 @@ void LocationsBuilderARM::VisitNativeDebugInfo(HNativeDebugInfo* info) {
}
void InstructionCodeGeneratorARM::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ if (codegen_->HasStackMapAtCurrentPc()) {
+ // Ensure that we do not collide with the stack map of the previous instruction.
+ __ nop();
+ }
codegen_->RecordPcInfo(info, info->GetDexPc());
}
@@ -1675,8 +1723,8 @@ void InstructionCodeGeneratorARM::HandleCondition(HCondition* cond) {
__ cmp(left.AsRegister<Register>(), ShifterOperand(right.AsRegister<Register>()));
} else {
DCHECK(right.IsConstant());
- GenerateCompareWithImmediate(left.AsRegister<Register>(),
- CodeGenerator::GetInt32ValueOf(right.GetConstant()));
+ __ CmpConstant(left.AsRegister<Register>(),
+ CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
__ it(ARMCondition(cond->GetCondition()), kItElse);
__ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
@@ -1891,7 +1939,7 @@ void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
}
void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
- GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+ codegen_->GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
}
void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
@@ -2846,8 +2894,7 @@ void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instructi
Register dividend = locations->InAt(0).AsRegister<Register>();
Register temp = locations->GetTemp(0).AsRegister<Register>();
int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
- uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
- DCHECK(IsPowerOfTwo(abs_imm));
+ uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
int ctz_imm = CTZ(abs_imm);
if (ctz_imm == 1) {
@@ -2923,7 +2970,7 @@ void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperatio
// Do not generate anything. DivZeroCheck would prevent any code to be executed.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
- } else if (IsPowerOfTwo(std::abs(imm))) {
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
DivRemByPowerOfTwo(instruction);
} else {
DCHECK(imm <= -2 || imm >= 2);
@@ -2952,12 +2999,12 @@ void LocationsBuilderARM::VisitDiv(HDiv* div) {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant()));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
- if (abs_imm <= 1) {
+ int32_t value = div->InputAt(1)->AsIntConstant()->GetValue();
+ if (value == 1 || value == 0 || value == -1) {
// No temp register required.
} else {
locations->AddTemp(Location::RequiresRegister());
- if (!IsPowerOfTwo(abs_imm)) {
+ if (!IsPowerOfTwo(AbsOrMin(value))) {
locations->AddTemp(Location::RequiresRegister());
}
}
@@ -3078,12 +3125,12 @@ void LocationsBuilderARM::VisitRem(HRem* rem) {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::ConstantLocation(rem->InputAt(1)->AsConstant()));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
- if (abs_imm <= 1) {
+ int32_t value = rem->InputAt(1)->AsIntConstant()->GetValue();
+ if (value == 1 || value == 0 || value == -1) {
// No temp register required.
} else {
locations->AddTemp(Location::RequiresRegister());
- if (!IsPowerOfTwo(abs_imm)) {
+ if (!IsPowerOfTwo(AbsOrMin(value))) {
locations->AddTemp(Location::RequiresRegister());
}
}
@@ -3437,7 +3484,7 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
Register first_reg = first.AsRegister<Register>();
if (second.IsRegister()) {
Register second_reg = second.AsRegister<Register>();
- // Arm doesn't mask the shift count so we need to do it ourselves.
+ // ARM doesn't mask the shift count so we need to do it ourselves.
__ and_(out_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
if (op->IsShl()) {
__ Lsl(out_reg, first_reg, out_reg);
@@ -3449,7 +3496,7 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
} else {
int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue);
- if (shift_value == 0) { // arm does not support shifting with 0 immediate.
+ if (shift_value == 0) { // ARM does not support shifting with 0 immediate.
__ Mov(out_reg, first_reg);
} else if (op->IsShl()) {
__ Lsl(out_reg, first_reg, shift_value);
@@ -3796,9 +3843,9 @@ void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
- // TODO (ported from quick): revisit Arm barrier kinds
- DmbOptions flavor = DmbOptions::ISH; // quiet c++ warnings
+void CodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
+ // TODO (ported from quick): revisit ARM barrier kinds.
+ DmbOptions flavor = DmbOptions::ISH; // Quiet C++ warnings.
switch (kind) {
case MemBarrierKind::kAnyStore:
case MemBarrierKind::kLoadAny:
@@ -3879,11 +3926,11 @@ void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldI
locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too.
locations->AddTemp(Location::RequiresRegister());
} else if (generate_volatile) {
- // Arm encoding have some additional constraints for ldrexd/strexd:
+ // ARM encoding have some additional constraints for ldrexd/strexd:
// - registers need to be consecutive
// - the first register should be even but not R14.
- // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
- // enable Arm encoding.
+ // We don't test for ARM yet, and the assertion makes sure that we
+ // revisit this if we ever enable ARM encoding.
DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
locations->AddTemp(Location::RequiresRegister());
@@ -3913,7 +3960,7 @@ void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
if (is_volatile) {
- GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
}
switch (field_type) {
@@ -4005,7 +4052,7 @@ void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
}
if (is_volatile) {
- GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
}
@@ -4039,14 +4086,18 @@ void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldI
(overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
}
if (volatile_for_double) {
- // Arm encoding have some additional constraints for ldrexd/strexd:
+ // ARM encoding have some additional constraints for ldrexd/strexd:
// - registers need to be consecutive
// - the first register should be even but not R14.
- // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
- // enable Arm encoding.
+ // We don't test for ARM yet, and the assertion makes sure that we
+ // revisit this if we ever enable ARM encoding.
DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
+ } else if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorARM::GenerateFieldLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
}
}
@@ -4105,33 +4156,52 @@ void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
switch (field_type) {
- case Primitive::kPrimBoolean: {
+ case Primitive::kPrimBoolean:
__ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
break;
- }
- case Primitive::kPrimByte: {
+ case Primitive::kPrimByte:
__ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
break;
- }
- case Primitive::kPrimShort: {
+ case Primitive::kPrimShort:
__ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
break;
- }
- case Primitive::kPrimChar: {
+ case Primitive::kPrimChar:
__ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
break;
- }
case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
__ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
break;
+
+ case Primitive::kPrimNot: {
+ // /* HeapReference<Object> */ out = *(base + offset)
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ Location temp_loc = locations->GetTemp(0);
+ // Note that a potential implicit null check is handled in this
+ // CodeGeneratorARM::GenerateFieldLoadWithBakerReadBarrier call.
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, out, base, offset, temp_loc, /* needs_null_check */ true);
+ if (is_volatile) {
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+ } else {
+ __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (is_volatile) {
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(instruction, out, out, base_loc, offset);
+ }
+ break;
}
- case Primitive::kPrimLong: {
+ case Primitive::kPrimLong:
if (is_volatile && !atomic_ldrd_strd) {
GenerateWideAtomicLoad(base, offset,
out.AsRegisterPairLow<Register>(),
@@ -4140,12 +4210,10 @@ void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
__ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
}
break;
- }
- case Primitive::kPrimFloat: {
+ case Primitive::kPrimFloat:
__ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
break;
- }
case Primitive::kPrimDouble: {
DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
@@ -4167,17 +4235,20 @@ void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
UNREACHABLE();
}
- // Doubles are handled in the switch.
- if (field_type != Primitive::kPrimDouble) {
+ if (field_type == Primitive::kPrimNot || field_type == Primitive::kPrimDouble) {
+ // Potential implicit null checks, in the case of reference or
+ // double fields, are handled in the previous switch statement.
+ } else {
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
if (is_volatile) {
- GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
- }
-
- if (field_type == Primitive::kPrimNot) {
- codegen_->MaybeGenerateReadBarrier(instruction, out, out, base_loc, offset);
+ if (field_type == Primitive::kPrimNot) {
+ // Memory barriers, in the case of references, are also handled
+ // in the previous switch statement.
+ } else {
+ codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
}
}
@@ -4340,6 +4411,11 @@ void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
Location::RequiresRegister(),
object_array_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
}
+ // We need a temporary register for the read barrier marking slow
+ // path in CodeGeneratorARM::GenerateArrayLoadWithBakerReadBarrier.
+ if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
@@ -4347,12 +4423,13 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
Location obj_loc = locations->InAt(0);
Register obj = obj_loc.AsRegister<Register>();
Location index = locations->InAt(1);
- Primitive::Type type = instruction->GetType();
+ Location out_loc = locations->Out();
+ Primitive::Type type = instruction->GetType();
switch (type) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
@@ -4366,7 +4443,7 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
@@ -4380,7 +4457,7 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimShort: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
@@ -4394,7 +4471,7 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
@@ -4406,13 +4483,9 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
break;
}
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
- static_assert(
- sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
- "art::mirror::HeapReference<mirror::Object> and int32_t have different sizes.");
+ case Primitive::kPrimInt: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register out = locations->Out().AsRegister<Register>();
+ Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
@@ -4424,44 +4497,79 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case Primitive::kPrimNot: {
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ // /* HeapReference<Object> */ out =
+ // *(obj + data_offset + index * sizeof(HeapReference<Object>))
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ Location temp = locations->GetTemp(0);
+ // Note that a potential implicit null check is handled in this
+ // CodeGeneratorARM::GenerateArrayLoadWithBakerReadBarrier call.
+ codegen_->GenerateArrayLoadWithBakerReadBarrier(
+ instruction, out_loc, obj, data_offset, index, temp, /* needs_null_check */ true);
+ } else {
+ Register out = out_loc.AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ LoadFromOffset(kLoadWord, out, obj, offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
+ } else {
+ __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
+ __ LoadFromOffset(kLoadWord, out, IP, data_offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
+ codegen_->MaybeGenerateReadBarrierSlow(
+ instruction, out_loc, out_loc, obj_loc, data_offset, index);
+ }
+ }
+ break;
+ }
+
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- Location out = locations->Out();
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
+ __ LoadFromOffset(kLoadWordPair, out_loc.AsRegisterPairLow<Register>(), obj, offset);
} else {
__ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
- __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
+ __ LoadFromOffset(kLoadWordPair, out_loc.AsRegisterPairLow<Register>(), IP, data_offset);
}
break;
}
case Primitive::kPrimFloat: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
- Location out = locations->Out();
- DCHECK(out.IsFpuRegister());
+ SRegister out = out_loc.AsFpuRegister<SRegister>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset);
+ __ LoadSFromOffset(out, obj, offset);
} else {
__ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
- __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset);
+ __ LoadSFromOffset(out, IP, data_offset);
}
break;
}
case Primitive::kPrimDouble: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
- Location out = locations->Out();
- DCHECK(out.IsFpuRegisterPair());
+ SRegister out = out_loc.AsFpuRegisterPairLow<SRegister>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset);
+ __ LoadDFromOffset(FromLowSToD(out), obj, offset);
} else {
__ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
- __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
+ __ LoadDFromOffset(FromLowSToD(out), IP, data_offset);
}
break;
}
@@ -4470,20 +4578,12 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
}
- codegen_->MaybeRecordImplicitNullCheck(instruction);
if (type == Primitive::kPrimNot) {
- static_assert(
- sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
- "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Location out = locations->Out();
- if (index.IsConstant()) {
- uint32_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- codegen_->MaybeGenerateReadBarrier(instruction, out, out, obj_loc, offset);
- } else {
- codegen_->MaybeGenerateReadBarrier(instruction, out, out, obj_loc, data_offset, index);
- }
+ // Potential implicit null checks, in the case of reference
+ // arrays, are handled in the previous switch statement.
+ } else {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
}
@@ -4574,6 +4674,7 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
__ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
__ StoreToOffset(kStoreWord, source, IP, data_offset);
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
DCHECK(!needs_write_barrier);
DCHECK(!may_need_runtime_call_for_type_check);
break;
@@ -4615,12 +4716,12 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
// __ Mov(temp2, temp1);
// // /* HeapReference<Class> */ temp1 = temp1->component_type_
// __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
- // codegen_->GenerateReadBarrier(
+ // codegen_->GenerateReadBarrierSlow(
// instruction, temp1_loc, temp1_loc, temp2_loc, component_offset);
//
// // /* HeapReference<Class> */ temp2 = value->klass_
// __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
- // codegen_->GenerateReadBarrier(
+ // codegen_->GenerateReadBarrierSlow(
// instruction, temp2_loc, temp2_loc, value_loc, class_offset, temp1_loc);
//
// __ cmp(temp1, ShifterOperand(temp2));
@@ -4717,8 +4818,6 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
__ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
__ StoreToOffset(kStoreWord, value, IP, data_offset);
}
-
- codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
@@ -4770,8 +4869,8 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
UNREACHABLE();
}
- // Ints and objects are handled in the switch.
- if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
+ // Objects are handled in the switch.
+ if (value_type != Primitive::kPrimNot) {
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
}
@@ -5140,16 +5239,9 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- uint32_t declaring_class_offset = ArtMethod::DeclaringClassOffset().Int32Value();
- if (kEmitCompilerReadBarrier) {
- // /* GcRoot<mirror::Class>* */ out = &(current_method->declaring_class_)
- __ AddConstant(out, current_method, declaring_class_offset);
- // /* mirror::Class* */ out = out->Read()
- codegen_->GenerateReadBarrierForRoot(cls, out_loc, out_loc);
- } else {
- // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
- __ LoadFromOffset(kLoadWord, out, current_method, declaring_class_offset);
- }
+ // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
+ GenerateGcRootFieldLoad(
+ cls, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
} else {
// /* GcRoot<mirror::Class>[] */ out =
// current_method.ptr_sized_fields_->dex_cache_resolved_types_
@@ -5157,17 +5249,8 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
out,
current_method,
ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
-
- size_t cache_offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex());
- if (kEmitCompilerReadBarrier) {
- // /* GcRoot<mirror::Class>* */ out = &out[type_index]
- __ AddConstant(out, out, cache_offset);
- // /* mirror::Class* */ out = out->Read()
- codegen_->GenerateReadBarrierForRoot(cls, out_loc, out_loc);
- } else {
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- __ LoadFromOffset(kLoadWord, out, out, cache_offset);
- }
+ // /* GcRoot<mirror::Class> */ out = out[type_index]
+ GenerateGcRootFieldLoad(cls, out_loc, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
if (!cls->IsInDexCache() || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
@@ -5230,30 +5313,14 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
Register out = out_loc.AsRegister<Register>();
Register current_method = locations->InAt(0).AsRegister<Register>();
- uint32_t declaring_class_offset = ArtMethod::DeclaringClassOffset().Int32Value();
- if (kEmitCompilerReadBarrier) {
- // /* GcRoot<mirror::Class>* */ out = &(current_method->declaring_class_)
- __ AddConstant(out, current_method, declaring_class_offset);
- // /* mirror::Class* */ out = out->Read()
- codegen_->GenerateReadBarrierForRoot(load, out_loc, out_loc);
- } else {
- // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
- __ LoadFromOffset(kLoadWord, out, current_method, declaring_class_offset);
- }
-
+ // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
+ GenerateGcRootFieldLoad(
+ load, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
// /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_
__ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
-
- size_t cache_offset = CodeGenerator::GetCacheOffset(load->GetStringIndex());
- if (kEmitCompilerReadBarrier) {
- // /* GcRoot<mirror::String>* */ out = &out[string_index]
- __ AddConstant(out, out, cache_offset);
- // /* mirror::String* */ out = out->Read()
- codegen_->GenerateReadBarrierForRoot(load, out_loc, out_loc);
- } else {
- // /* GcRoot<mirror::String> */ out = out[string_index]
- __ LoadFromOffset(kLoadWord, out, out, cache_offset);
- }
+ // /* GcRoot<mirror::String> */ out = out[string_index]
+ GenerateGcRootFieldLoad(
+ load, out_loc, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
if (!load->IsInDexCache()) {
SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
@@ -5300,6 +5367,14 @@ void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
}
+static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) {
+ return kEmitCompilerReadBarrier &&
+ (kUseBakerReadBarrier ||
+ type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+ type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+ type_check_kind == TypeCheckKind::kArrayObjectCheck);
+}
+
void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
@@ -5326,21 +5401,22 @@ void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
// When read barriers are enabled, we need a temporary register for
// some cases.
- if (kEmitCompilerReadBarrier &&
- (type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+ if (TypeCheckNeedsATemporary(type_check_kind)) {
locations->AddTemp(Location::RequiresRegister());
}
}
void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
Location obj_loc = locations->InAt(0);
Register obj = obj_loc.AsRegister<Register>();
Register cls = locations->InAt(1).AsRegister<Register>();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
+ Location temp_loc = TypeCheckNeedsATemporary(type_check_kind) ?
+ locations->GetTemp(0) :
+ Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -5355,10 +5431,9 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
}
// /* HeapReference<Class> */ out = obj->klass_
- __ LoadFromOffset(kLoadWord, out, obj, class_offset);
- codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset, temp_loc);
- switch (instruction->GetTypeCheckKind()) {
+ switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
__ cmp(out, ShifterOperand(cls));
// Classes must be equal for the instanceof to succeed.
@@ -5373,17 +5448,8 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
// object to avoid doing a comparison we know will fail.
Label loop;
__ Bind(&loop);
- Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `out` into `temp` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- Register temp = temp_loc.AsRegister<Register>();
- __ Mov(temp, out);
- }
// /* HeapReference<Class> */ out = out->super_class_
- __ LoadFromOffset(kLoadWord, out, out, super_offset);
- codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, super_offset);
+ GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, temp_loc);
// If `out` is null, we use it for the result, and jump to `done`.
__ CompareAndBranchIfZero(out, &done);
__ cmp(out, ShifterOperand(cls));
@@ -5401,17 +5467,8 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
__ Bind(&loop);
__ cmp(out, ShifterOperand(cls));
__ b(&success, EQ);
- Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `out` into `temp` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- Register temp = temp_loc.AsRegister<Register>();
- __ Mov(temp, out);
- }
// /* HeapReference<Class> */ out = out->super_class_
- __ LoadFromOffset(kLoadWord, out, out, super_offset);
- codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, super_offset);
+ GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, temp_loc);
__ CompareAndBranchIfNonZero(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ b(&done);
@@ -5429,17 +5486,8 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
__ cmp(out, ShifterOperand(cls));
__ b(&exact_check, EQ);
// Otherwise, we need to check that the object's class is a non-primitive array.
- Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `out` into `temp` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- Register temp = temp_loc.AsRegister<Register>();
- __ Mov(temp, out);
- }
// /* HeapReference<Class> */ out = out->component_type_
- __ LoadFromOffset(kLoadWord, out, out, component_offset);
- codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, component_offset);
+ GenerateReferenceLoadOneRegister(instruction, out_loc, component_offset, temp_loc);
// If `out` is null, we use it for the result, and jump to `done`.
__ CompareAndBranchIfZero(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
@@ -5478,6 +5526,13 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
// HInstanceOf instruction (following the runtime calling
// convention), which might be cluttered by the potential first
// read barrier emission at the beginning of this method.
+ //
+ // TODO: Introduce a new runtime entry point taking the object
+ // to test (instead of its class) as argument, and let it deal
+ // with the read barrier issues. This will let us refactor this
+ // case of the `switch` code as it was previously (with a direct
+ // call to the runtime not using a type checking slow path).
+ // This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction,
/* is_fatal */ false);
@@ -5532,27 +5587,27 @@ void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
locations->AddTemp(Location::RequiresRegister());
// When read barriers are enabled, we need an additional temporary
// register for some cases.
- if (kEmitCompilerReadBarrier &&
- (type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+ if (TypeCheckNeedsATemporary(type_check_kind)) {
locations->AddTemp(Location::RequiresRegister());
}
}
void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
+ TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
LocationSummary* locations = instruction->GetLocations();
Location obj_loc = locations->InAt(0);
Register obj = obj_loc.AsRegister<Register>();
Register cls = locations->InAt(1).AsRegister<Register>();
Location temp_loc = locations->GetTemp(0);
Register temp = temp_loc.AsRegister<Register>();
+ Location temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ?
+ locations->GetTemp(1) :
+ Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
- TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
bool is_type_check_slow_path_fatal =
(type_check_kind == TypeCheckKind::kExactCheck ||
type_check_kind == TypeCheckKind::kAbstractClassCheck ||
@@ -5571,8 +5626,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
}
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
@@ -5589,18 +5643,8 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
// object to avoid doing a comparison we know will fail.
Label loop, compare_classes;
__ Bind(&loop);
- Location temp2_loc =
- kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `temp` into `temp2` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- Register temp2 = temp2_loc.AsRegister<Register>();
- __ Mov(temp2, temp);
- }
// /* HeapReference<Class> */ temp = temp->super_class_
- __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, temp2_loc, super_offset);
+ GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, temp2_loc);
// If the class reference currently in `temp` is not null, jump
// to the `compare_classes` label to compare it with the checked
@@ -5612,8 +5656,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
// going into the slow path, as it has been overwritten in the
// meantime.
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
__ b(type_check_slow_path->GetEntryLabel());
__ Bind(&compare_classes);
@@ -5629,18 +5672,8 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
__ cmp(temp, ShifterOperand(cls));
__ b(&done, EQ);
- Location temp2_loc =
- kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `temp` into `temp2` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- Register temp2 = temp2_loc.AsRegister<Register>();
- __ Mov(temp2, temp);
- }
// /* HeapReference<Class> */ temp = temp->super_class_
- __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, temp2_loc, super_offset);
+ GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, temp2_loc);
// If the class reference currently in `temp` is not null, jump
// back at the beginning of the loop.
@@ -5651,8 +5684,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
// going into the slow path, as it has been overwritten in the
// meantime.
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
__ b(type_check_slow_path->GetEntryLabel());
break;
}
@@ -5664,19 +5696,8 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
__ b(&done, EQ);
// Otherwise, we need to check that the object's class is a non-primitive array.
- Location temp2_loc =
- kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation();
- if (kEmitCompilerReadBarrier) {
- // Save the value of `temp` into `temp2` before overwriting it
- // in the following move operation, as we will need it for the
- // read barrier below.
- Register temp2 = temp2_loc.AsRegister<Register>();
- __ Mov(temp2, temp);
- }
// /* HeapReference<Class> */ temp = temp->component_type_
- __ LoadFromOffset(kLoadWord, temp, temp, component_offset);
- codegen_->MaybeGenerateReadBarrier(
- instruction, temp_loc, temp_loc, temp2_loc, component_offset);
+ GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, temp2_loc);
// If the component type is not null (i.e. the object is indeed
// an array), jump to label `check_non_primitive_component_type`
@@ -5689,8 +5710,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
// going into the slow path, as it has been overwritten in the
// meantime.
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
__ b(type_check_slow_path->GetEntryLabel());
__ Bind(&check_non_primitive_component_type);
@@ -5699,8 +5719,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
__ CompareAndBranchIfZero(temp, &done);
// Same comment as above regarding `temp` and the slow path.
// /* HeapReference<Class> */ temp = obj->klass_
- __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
- codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset);
+ GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc);
__ b(type_check_slow_path->GetEntryLabel());
break;
}
@@ -5717,6 +5736,13 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
// instruction (following the runtime calling convention), which
// might be cluttered by the potential first read barrier
// emission at the beginning of this method.
+ //
+ // TODO: Introduce a new runtime entry point taking the object
+ // to test (instead of its class) as argument, and let it deal
+ // with the read barrier issues. This will let us refactor this
+ // case of the `switch` code as it was previously (with a direct
+ // call to the runtime not using a type checking slow path).
+ // This should also be beneficial for the other cases above.
__ b(type_check_slow_path->GetEntryLabel());
break;
}
@@ -5901,14 +5927,249 @@ void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instr
}
}
-void CodeGeneratorARM::GenerateReadBarrier(HInstruction* instruction,
- Location out,
- Location ref,
- Location obj,
- uint32_t offset,
- Location index) {
+void InstructionCodeGeneratorARM::GenerateReferenceLoadOneRegister(HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location temp) {
+ Register out_reg = out.AsRegister<Register>();
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ // Load with fast path based Baker's read barrier.
+ // /* HeapReference<Object> */ out = *(out + offset)
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, out, out_reg, offset, temp, /* needs_null_check */ false);
+ } else {
+ // Load with slow path based read barrier.
+ // Save the value of `out` into `temp` before overwriting it
+ // in the following move operation, as we will need it for the
+ // read barrier below.
+ __ Mov(temp.AsRegister<Register>(), out_reg);
+ // /* HeapReference<Object> */ out = *(out + offset)
+ __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
+ codegen_->GenerateReadBarrierSlow(instruction, out, out, temp, offset);
+ }
+ } else {
+ // Plain load with no read barrier.
+ // /* HeapReference<Object> */ out = *(out + offset)
+ __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
+ __ MaybeUnpoisonHeapReference(out_reg);
+ }
+}
+
+void InstructionCodeGeneratorARM::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location temp) {
+ Register out_reg = out.AsRegister<Register>();
+ Register obj_reg = obj.AsRegister<Register>();
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ // Load with fast path based Baker's read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, out, obj_reg, offset, temp, /* needs_null_check */ false);
+ } else {
+ // Load with slow path based read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset);
+ codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset);
+ }
+ } else {
+ // Plain load with no read barrier.
+ // /* HeapReference<Object> */ out = *(obj + offset)
+ __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset);
+ __ MaybeUnpoisonHeapReference(out_reg);
+ }
+}
+
+void InstructionCodeGeneratorARM::GenerateGcRootFieldLoad(HInstruction* instruction,
+ Location root,
+ Register obj,
+ uint32_t offset) {
+ Register root_reg = root.AsRegister<Register>();
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ // Fast path implementation of art::ReadBarrier::BarrierForRoot when
+ // Baker's read barrier are used:
+ //
+ // root = obj.field;
+ // if (Thread::Current()->GetIsGcMarking()) {
+ // root = ReadBarrier::Mark(root)
+ // }
+
+ // /* GcRoot<mirror::Object> */ root = *(obj + offset)
+ __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
+ static_assert(
+ sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
+ "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
+ "have different sizes.");
+ static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::CompressedReference<mirror::Object> and int32_t "
+ "have different sizes.");
+
+ // Slow path used to mark the GC root `root`.
+ SlowPathCode* slow_path =
+ new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(instruction, root, root);
+ codegen_->AddSlowPath(slow_path);
+
+ __ LoadFromOffset(
+ kLoadWord, IP, TR, Thread::IsGcMarkingOffset<kArmWordSize>().Int32Value());
+ __ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ } else {
+ // GC root loaded through a slow path for read barriers other
+ // than Baker's.
+ // /* GcRoot<mirror::Object>* */ root = obj + offset
+ __ AddConstant(root_reg, obj, offset);
+ // /* mirror::Object* */ root = root->Read()
+ codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
+ }
+ } else {
+ // Plain GC root load with no read barrier.
+ // /* GcRoot<mirror::Object> */ root = *(obj + offset)
+ __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
+ // Note that GC roots are not affected by heap poisoning, thus we
+ // do not have to unpoison `root_reg` here.
+ }
+}
+
+void CodeGeneratorARM::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ uint32_t offset,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // /* HeapReference<Object> */ ref = *(obj + offset)
+ Location no_index = Location::NoLocation();
+ GenerateReferenceLoadWithBakerReadBarrier(
+ instruction, ref, obj, offset, no_index, temp, needs_null_check);
+}
+
+void CodeGeneratorARM::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ uint32_t data_offset,
+ Location index,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // /* HeapReference<Object> */ ref =
+ // *(obj + data_offset + index * sizeof(HeapReference<Object>))
+ GenerateReferenceLoadWithBakerReadBarrier(
+ instruction, ref, obj, data_offset, index, temp, needs_null_check);
+}
+
+void CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ uint32_t offset,
+ Location index,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // In slow path based read barriers, the read barrier call is
+ // inserted after the original load. However, in fast path based
+ // Baker's read barriers, we need to perform the load of
+ // mirror::Object::monitor_ *before* the original reference load.
+ // This load-load ordering is required by the read barrier.
+ // The fast path/slow path (for Baker's algorithm) should look like:
+ //
+ // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
+ // lfence; // Load fence or artificial data dependency to prevent load-load reordering
+ // HeapReference<Object> ref = *src; // Original reference load.
+ // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // if (is_gray) {
+ // ref = ReadBarrier::Mark(ref); // Performed by runtime entrypoint slow path.
+ // }
+ //
+ // Note: the original implementation in ReadBarrier::Barrier is
+ // slightly more complex as:
+ // - it implements the load-load fence using a data dependency on
+ // the high-bits of rb_state, which are expected to be all zeroes;
+ // - it performs additional checks that we do not do here for
+ // performance reasons.
+
+ Register ref_reg = ref.AsRegister<Register>();
+ Register temp_reg = temp.AsRegister<Register>();
+ uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
+
+ // /* int32_t */ monitor = obj->monitor_
+ __ LoadFromOffset(kLoadWord, temp_reg, obj, monitor_offset);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ // /* LockWord */ lock_word = LockWord(monitor)
+ static_assert(sizeof(LockWord) == sizeof(int32_t),
+ "art::LockWord and int32_t have different sizes.");
+ // /* uint32_t */ rb_state = lock_word.ReadBarrierState()
+ __ Lsr(temp_reg, temp_reg, LockWord::kReadBarrierStateShift);
+ __ and_(temp_reg, temp_reg, ShifterOperand(LockWord::kReadBarrierStateMask));
+ static_assert(
+ LockWord::kReadBarrierStateMask == ReadBarrier::rb_ptr_mask_,
+ "art::LockWord::kReadBarrierStateMask is not equal to art::ReadBarrier::rb_ptr_mask_.");
+
+ // Introduce a dependency on the high bits of rb_state, which shall
+ // be all zeroes, to prevent load-load reordering, and without using
+ // a memory barrier (which would be more expensive).
+ // IP = rb_state & ~LockWord::kReadBarrierStateMask = 0
+ __ bic(IP, temp_reg, ShifterOperand(LockWord::kReadBarrierStateMask));
+ // obj is unchanged by this operation, but its value now depends on
+ // IP, which depends on temp_reg.
+ __ add(obj, obj, ShifterOperand(IP));
+
+ // The actual reference load.
+ if (index.IsValid()) {
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ // /* HeapReference<Object> */ ref =
+ // *(obj + offset + index * sizeof(HeapReference<Object>))
+ if (index.IsConstant()) {
+ size_t computed_offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset;
+ __ LoadFromOffset(kLoadWord, ref_reg, obj, computed_offset);
+ } else {
+ __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
+ __ LoadFromOffset(kLoadWord, ref_reg, IP, offset);
+ }
+ } else {
+ // /* HeapReference<Object> */ ref = *(obj + offset)
+ __ LoadFromOffset(kLoadWord, ref_reg, obj, offset);
+ }
+
+ // Object* ref = ref_addr->AsMirrorPtr()
+ __ MaybeUnpoisonHeapReference(ref_reg);
+
+ // Slow path used to mark the object `ref` when it is gray.
+ SlowPathCode* slow_path =
+ new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(instruction, ref, ref);
+ AddSlowPath(slow_path);
+
+ // if (rb_state == ReadBarrier::gray_ptr_)
+ // ref = ReadBarrier::Mark(ref);
+ __ cmp(temp_reg, ShifterOperand(ReadBarrier::gray_ptr_));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void CodeGeneratorARM::GenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
DCHECK(kEmitCompilerReadBarrier);
+ // Insert a slow path based read barrier *after* the reference load.
+ //
// If heap poisoning is enabled, the unpoisoning of the loaded
// reference will be carried out by the runtime within the slow
// path.
@@ -5922,57 +6183,41 @@ void CodeGeneratorARM::GenerateReadBarrier(HInstruction* instruction,
ReadBarrierForHeapReferenceSlowPathARM(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
- // TODO: When read barrier has a fast path, add it here.
- /* Currently the read barrier call is inserted after the original load.
- * However, if we have a fast path, we need to perform the load of obj.LockWord *before* the
- * original load. This load-load ordering is required by the read barrier.
- * The fast path/slow path (for Baker's algorithm) should look like:
- *
- * bool isGray = obj.LockWord & kReadBarrierMask;
- * lfence; // load fence or artificial data dependence to prevent load-load reordering
- * ref = obj.field; // this is the original load
- * if (isGray) {
- * ref = Mark(ref); // ideally the slow path just does Mark(ref)
- * }
- */
-
__ b(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
-void CodeGeneratorARM::MaybeGenerateReadBarrier(HInstruction* instruction,
- Location out,
- Location ref,
- Location obj,
- uint32_t offset,
- Location index) {
+void CodeGeneratorARM::MaybeGenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index) {
if (kEmitCompilerReadBarrier) {
+ // Baker's read barriers shall be handled by the fast path
+ // (CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier).
+ DCHECK(!kUseBakerReadBarrier);
// If heap poisoning is enabled, unpoisoning will be taken care of
// by the runtime within the slow path.
- GenerateReadBarrier(instruction, out, ref, obj, offset, index);
+ GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index);
} else if (kPoisonHeapReferences) {
__ UnpoisonHeapReference(out.AsRegister<Register>());
}
}
-void CodeGeneratorARM::GenerateReadBarrierForRoot(HInstruction* instruction,
- Location out,
- Location root) {
+void CodeGeneratorARM::GenerateReadBarrierForRootSlow(HInstruction* instruction,
+ Location out,
+ Location root) {
DCHECK(kEmitCompilerReadBarrier);
+ // Insert a slow path based read barrier *after* the GC root load.
+ //
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCode* slow_path =
new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathARM(instruction, out, root);
AddSlowPath(slow_path);
- // TODO: Implement a fast path for ReadBarrierForRoot, performing
- // the following operation (for Baker's algorithm):
- //
- // if (thread.tls32_.is_gc_marking) {
- // root = Mark(root);
- // }
-
__ b(slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
@@ -6304,7 +6549,7 @@ void InstructionCodeGeneratorARM::VisitPackedSwitch(HPackedSwitch* switch_instr)
}
if (num_entries - last_index == 2) {
// The last missing case_value.
- GenerateCompareWithImmediate(temp_reg, 1);
+ __ CmpConstant(temp_reg, 1);
__ b(codegen_->GetLabelOf(successors[last_index + 1]), EQ);
}
@@ -6364,7 +6609,7 @@ void InstructionCodeGeneratorARM::VisitArmDexCacheArraysBase(HArmDexCacheArraysB
void CodeGeneratorARM::MoveFromReturnRegister(Location trg, Primitive::Type type) {
if (!trg.IsValid()) {
- DCHECK(type == Primitive::kPrimVoid);
+ DCHECK_EQ(type, Primitive::kPrimVoid);
return;
}
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index b7c58e1248..26d6d63b31 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -188,7 +188,7 @@ class LocationsBuilderARM : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM);
};
-class InstructionCodeGeneratorARM : public HGraphVisitor {
+class InstructionCodeGeneratorARM : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen);
@@ -222,24 +222,57 @@ class InstructionCodeGeneratorARM : public HGraphVisitor {
void HandleLongRotate(LocationSummary* locations);
void HandleRotate(HRor* ror);
void HandleShift(HBinaryOperation* operation);
- void GenerateMemoryBarrier(MemBarrierKind kind);
+
void GenerateWideAtomicStore(Register addr, uint32_t offset,
Register value_lo, Register value_hi,
Register temp1, Register temp2,
HInstruction* instruction);
void GenerateWideAtomicLoad(Register addr, uint32_t offset,
Register out_lo, Register out_hi);
+
void HandleFieldSet(HInstruction* instruction,
const FieldInfo& field_info,
bool value_can_be_null);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+
+ // Generate a heap reference load using one register `out`:
+ //
+ // out <- *(out + offset)
+ //
+ // while honoring heap poisoning and/or read barriers (if any).
+ // Register `temp` is used when generating a read barrier.
+ void GenerateReferenceLoadOneRegister(HInstruction* instruction,
+ Location out,
+ uint32_t offset,
+ Location temp);
+ // Generate a heap reference load using two different registers
+ // `out` and `obj`:
+ //
+ // out <- *(obj + offset)
+ //
+ // while honoring heap poisoning and/or read barriers (if any).
+ // Register `temp` is used when generating a Baker's read barrier.
+ void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
+ Location out,
+ Location obj,
+ uint32_t offset,
+ Location temp);
+ // Generate a GC root reference load:
+ //
+ // root <- *(obj + offset)
+ //
+ // while honoring read barriers (if any).
+ void GenerateGcRootFieldLoad(HInstruction* instruction,
+ Location root,
+ Register obj,
+ uint32_t offset);
+
void GenerateImplicitNullCheck(HNullCheck* instruction);
void GenerateExplicitNullCheck(HNullCheck* instruction);
void GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
Label* true_target,
Label* false_target);
- void GenerateCompareWithImmediate(Register left, int32_t right);
void GenerateCompareTestAndBranch(HCondition* condition,
Label* true_target,
Label* false_target);
@@ -346,6 +379,8 @@ class CodeGeneratorARM : public CodeGenerator {
// Emit a write barrier.
void MarkGCCard(Register temp, Register card, Register object, Register value, bool can_be_null);
+ void GenerateMemoryBarrier(MemBarrierKind kind);
+
Label* GetLabelOf(HBasicBlock* block) const {
return CommonGetLabelOf<Label>(block_labels_, block);
}
@@ -406,7 +441,26 @@ class CodeGeneratorARM : public CodeGenerator {
return &it->second;
}
- // Generate a read barrier for a heap reference within `instruction`.
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference field load when Baker's read barriers are used.
+ void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location out,
+ Register obj,
+ uint32_t offset,
+ Location temp,
+ bool needs_null_check);
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference array load when Baker's read barriers are used.
+ void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location out,
+ Register obj,
+ uint32_t data_offset,
+ Location index,
+ Location temp,
+ bool needs_null_check);
+
+ // Generate a read barrier for a heap reference within `instruction`
+ // using a slow path.
//
// A read barrier for an object reference read from the heap is
// implemented as a call to the artReadBarrierSlow runtime entry
@@ -423,23 +477,25 @@ class CodeGeneratorARM : public CodeGenerator {
// When `index` is provided (i.e. for array accesses), the offset
// value passed to artReadBarrierSlow is adjusted to take `index`
// into account.
- void GenerateReadBarrier(HInstruction* instruction,
- Location out,
- Location ref,
- Location obj,
- uint32_t offset,
- Location index = Location::NoLocation());
-
- // If read barriers are enabled, generate a read barrier for a heap reference.
- // If heap poisoning is enabled, also unpoison the reference in `out`.
- void MaybeGenerateReadBarrier(HInstruction* instruction,
- Location out,
- Location ref,
- Location obj,
- uint32_t offset,
- Location index = Location::NoLocation());
-
- // Generate a read barrier for a GC root within `instruction`.
+ void GenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
+
+ // If read barriers are enabled, generate a read barrier for a heap
+ // reference using a slow path. If heap poisoning is enabled, also
+ // unpoison the reference in `out`.
+ void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
+ Location out,
+ Location ref,
+ Location obj,
+ uint32_t offset,
+ Location index = Location::NoLocation());
+
+ // Generate a read barrier for a GC root within `instruction` using
+ // a slow path.
//
// A read barrier for an object reference GC root is implemented as
// a call to the artReadBarrierForRootSlow runtime entry point,
@@ -449,9 +505,19 @@ class CodeGeneratorARM : public CodeGenerator {
//
// The `out` location contains the value returned by
// artReadBarrierForRootSlow.
- void GenerateReadBarrierForRoot(HInstruction* instruction, Location out, Location root);
+ void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
private:
+ // Factored implementation of GenerateFieldLoadWithBakerReadBarrier
+ // and GenerateArrayLoadWithBakerReadBarrier.
+ void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ uint32_t offset,
+ Location index,
+ Location temp,
+ bool needs_null_check);
+
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index b49f42b6c8..a3150d3d22 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -477,24 +477,24 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
public:
- explicit DeoptimizationSlowPathARM64(HInstruction* instruction)
+ explicit DeoptimizationSlowPathARM64(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
+ arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
};
@@ -1605,7 +1605,7 @@ void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruct
InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
CodeGeneratorARM64* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -2534,8 +2534,7 @@ void InstructionCodeGeneratorARM64::DivRemByPowerOfTwo(HBinaryOperation* instruc
Register out = OutputRegister(instruction);
Register dividend = InputRegisterAt(instruction, 0);
int64_t imm = Int64FromConstant(second.GetConstant());
- uint64_t abs_imm = static_cast<uint64_t>(std::abs(imm));
- DCHECK(IsPowerOfTwo(abs_imm));
+ uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm));
int ctz_imm = CTZ(abs_imm);
UseScratchRegisterScope temps(GetVIXLAssembler());
@@ -2627,7 +2626,7 @@ void InstructionCodeGeneratorARM64::GenerateDivRemIntegral(HBinaryOperation* ins
// Do not generate anything. DivZeroCheck would prevent any code to be executed.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
- } else if (IsPowerOfTwo(std::abs(imm))) {
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
DivRemByPowerOfTwo(instruction);
} else {
DCHECK(imm <= -2 || imm >= 2);
@@ -2940,9 +2939,8 @@ void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena())
- DeoptimizationSlowPathARM64(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCodeARM64* slow_path =
+ deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM64>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
@@ -2954,6 +2952,10 @@ void LocationsBuilderARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
}
void InstructionCodeGeneratorARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ if (codegen_->HasStackMapAtCurrentPc()) {
+ // Ensure that we do not collide with the stack map of the previous instruction.
+ __ Nop();
+ }
codegen_->RecordPcInfo(info, info->GetDexPc());
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 0e90ac6345..f2ff89488e 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -186,7 +186,7 @@ class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention {
DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM64);
};
-class InstructionCodeGeneratorARM64 : public HGraphVisitor {
+class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 4648606da8..322912976e 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -444,19 +444,16 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
public:
- explicit DeoptimizationSlowPathMIPS(HInstruction* instruction)
+ explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
instruction_,
- dex_pc,
+ instruction_->GetDexPc(),
this,
IsDirectEntrypoint(kQuickDeoptimize));
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
@@ -465,7 +462,7 @@ class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
};
@@ -608,9 +605,9 @@ void ParallelMoveResolverMIPS::EmitSwap(size_t index) {
// then swap the high 32 bits of the same FPR. mtc1 makes the high 32 bits of an FPR
// unpredictable and the following mfch1 will fail.
__ Mfc1(TMP, f1);
- __ Mfhc1(AT, f1);
+ __ MoveFromFpuHigh(AT, f1);
__ Mtc1(r2_l, f1);
- __ Mthc1(r2_h, f1);
+ __ MoveToFpuHigh(r2_h, f1);
__ Move(r2_l, TMP);
__ Move(r2_h, AT);
} else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
@@ -862,7 +859,7 @@ void CodeGeneratorMIPS::Move64(Location destination, Location source) {
Register dst_low = destination.AsRegisterPairLow<Register>();
FRegister src = source.AsFpuRegister<FRegister>();
__ Mfc1(dst_low, src);
- __ Mfhc1(dst_high, src);
+ __ MoveFromFpuHigh(dst_high, src);
} else {
DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
int32_t off = source.GetStackIndex();
@@ -875,7 +872,7 @@ void CodeGeneratorMIPS::Move64(Location destination, Location source) {
Register src_high = source.AsRegisterPairHigh<Register>();
Register src_low = source.AsRegisterPairLow<Register>();
__ Mtc1(src_low, dst);
- __ Mthc1(src_high, dst);
+ __ MoveToFpuHigh(src_high, dst);
} else if (source.IsFpuRegister()) {
__ MovD(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
} else {
@@ -1241,7 +1238,7 @@ void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instructi
InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph,
CodeGeneratorMIPS* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -1511,7 +1508,7 @@ void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction)
}
void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
- DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
Primitive::Type type = instr->GetResultType();
@@ -1534,7 +1531,7 @@ void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
static constexpr size_t kMipsBitsPerWord = kMipsWordSize * kBitsPerByte;
void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
- DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
LocationSummary* locations = instr->GetLocations();
Primitive::Type type = instr->GetType();
@@ -1542,30 +1539,58 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
bool use_imm = rhs_location.IsConstant();
Register rhs_reg = use_imm ? ZERO : rhs_location.AsRegister<Register>();
int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0;
- uint32_t shift_mask = (type == Primitive::kPrimInt) ? kMaxIntShiftValue : kMaxLongShiftValue;
- uint32_t shift_value = rhs_imm & shift_mask;
- // Is the INS (Insert Bit Field) instruction supported?
- bool has_ins = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
+ const uint32_t shift_mask = (type == Primitive::kPrimInt)
+ ? kMaxIntShiftValue
+ : kMaxLongShiftValue;
+ const uint32_t shift_value = rhs_imm & shift_mask;
+ // Are the INS (Insert Bit Field) and ROTR instructions supported?
+ bool has_ins_rotr = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
switch (type) {
case Primitive::kPrimInt: {
Register dst = locations->Out().AsRegister<Register>();
Register lhs = locations->InAt(0).AsRegister<Register>();
if (use_imm) {
- if (instr->IsShl()) {
+ if (shift_value == 0) {
+ if (dst != lhs) {
+ __ Move(dst, lhs);
+ }
+ } else if (instr->IsShl()) {
__ Sll(dst, lhs, shift_value);
} else if (instr->IsShr()) {
__ Sra(dst, lhs, shift_value);
- } else {
+ } else if (instr->IsUShr()) {
__ Srl(dst, lhs, shift_value);
+ } else {
+ if (has_ins_rotr) {
+ __ Rotr(dst, lhs, shift_value);
+ } else {
+ __ Sll(TMP, lhs, (kMipsBitsPerWord - shift_value) & shift_mask);
+ __ Srl(dst, lhs, shift_value);
+ __ Or(dst, dst, TMP);
+ }
}
} else {
if (instr->IsShl()) {
__ Sllv(dst, lhs, rhs_reg);
} else if (instr->IsShr()) {
__ Srav(dst, lhs, rhs_reg);
- } else {
+ } else if (instr->IsUShr()) {
__ Srlv(dst, lhs, rhs_reg);
+ } else {
+ if (has_ins_rotr) {
+ __ Rotrv(dst, lhs, rhs_reg);
+ } else {
+ __ Subu(TMP, ZERO, rhs_reg);
+ // 32-bit shift instructions use the 5 least significant bits of the shift count, so
+ // shifting by `-rhs_reg` is equivalent to shifting by `(32 - rhs_reg) & 31`. The case
+ // when `rhs_reg & 31 == 0` is OK even though we don't shift `lhs` left all the way out
+ // by 32, because the result in this case is computed as `(lhs >> 0) | (lhs << 0)`,
+ // IOW, the OR'd values are equal.
+ __ Sllv(TMP, lhs, TMP);
+ __ Srlv(dst, lhs, rhs_reg);
+ __ Or(dst, dst, TMP);
+ }
}
}
break;
@@ -1580,7 +1605,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
if (shift_value == 0) {
codegen_->Move64(locations->Out(), locations->InAt(0));
} else if (shift_value < kMipsBitsPerWord) {
- if (has_ins) {
+ if (has_ins_rotr) {
if (instr->IsShl()) {
__ Srl(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
__ Ins(dst_high, lhs_high, shift_value, kMipsBitsPerWord - shift_value);
@@ -1589,10 +1614,15 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Srl(dst_low, lhs_low, shift_value);
__ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
__ Sra(dst_high, lhs_high, shift_value);
+ } else if (instr->IsUShr()) {
+ __ Srl(dst_low, lhs_low, shift_value);
+ __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
+ __ Srl(dst_high, lhs_high, shift_value);
} else {
__ Srl(dst_low, lhs_low, shift_value);
__ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
__ Srl(dst_high, lhs_high, shift_value);
+ __ Ins(dst_high, lhs_low, kMipsBitsPerWord - shift_value, shift_value);
}
} else {
if (instr->IsShl()) {
@@ -1605,24 +1635,51 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
__ Srl(dst_low, lhs_low, shift_value);
__ Or(dst_low, dst_low, TMP);
- } else {
+ } else if (instr->IsUShr()) {
__ Srl(dst_high, lhs_high, shift_value);
__ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
__ Srl(dst_low, lhs_low, shift_value);
__ Or(dst_low, dst_low, TMP);
+ } else {
+ __ Srl(TMP, lhs_low, shift_value);
+ __ Sll(dst_low, lhs_high, kMipsBitsPerWord - shift_value);
+ __ Or(dst_low, dst_low, TMP);
+ __ Srl(TMP, lhs_high, shift_value);
+ __ Sll(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
+ __ Or(dst_high, dst_high, TMP);
}
}
} else {
- shift_value -= kMipsBitsPerWord;
+ const uint32_t shift_value_high = shift_value - kMipsBitsPerWord;
if (instr->IsShl()) {
- __ Sll(dst_high, lhs_low, shift_value);
+ __ Sll(dst_high, lhs_low, shift_value_high);
__ Move(dst_low, ZERO);
} else if (instr->IsShr()) {
- __ Sra(dst_low, lhs_high, shift_value);
+ __ Sra(dst_low, lhs_high, shift_value_high);
__ Sra(dst_high, dst_low, kMipsBitsPerWord - 1);
- } else {
- __ Srl(dst_low, lhs_high, shift_value);
+ } else if (instr->IsUShr()) {
+ __ Srl(dst_low, lhs_high, shift_value_high);
__ Move(dst_high, ZERO);
+ } else {
+ if (shift_value == kMipsBitsPerWord) {
+ // 64-bit rotation by 32 is just a swap.
+ __ Move(dst_low, lhs_high);
+ __ Move(dst_high, lhs_low);
+ } else {
+ if (has_ins_rotr) {
+ __ Srl(dst_low, lhs_high, shift_value_high);
+ __ Ins(dst_low, lhs_low, kMipsBitsPerWord - shift_value_high, shift_value_high);
+ __ Srl(dst_high, lhs_low, shift_value_high);
+ __ Ins(dst_high, lhs_high, kMipsBitsPerWord - shift_value_high, shift_value_high);
+ } else {
+ __ Sll(TMP, lhs_low, kMipsBitsPerWord - shift_value_high);
+ __ Srl(dst_low, lhs_high, shift_value_high);
+ __ Or(dst_low, dst_low, TMP);
+ __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value_high);
+ __ Srl(dst_high, lhs_low, shift_value_high);
+ __ Or(dst_high, dst_high, TMP);
+ }
+ }
}
}
} else {
@@ -1649,7 +1706,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Beqz(TMP, &done);
__ Move(dst_low, dst_high);
__ Sra(dst_high, dst_high, 31);
- } else {
+ } else if (instr->IsUShr()) {
__ Srlv(dst_high, lhs_high, rhs_reg);
__ Nor(AT, ZERO, rhs_reg);
__ Sll(TMP, lhs_high, 1);
@@ -1660,6 +1717,21 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Beqz(TMP, &done);
__ Move(dst_low, dst_high);
__ Move(dst_high, ZERO);
+ } else {
+ __ Nor(AT, ZERO, rhs_reg);
+ __ Srlv(TMP, lhs_low, rhs_reg);
+ __ Sll(dst_low, lhs_high, 1);
+ __ Sllv(dst_low, dst_low, AT);
+ __ Or(dst_low, dst_low, TMP);
+ __ Srlv(TMP, lhs_high, rhs_reg);
+ __ Sll(dst_high, lhs_low, 1);
+ __ Sllv(dst_high, dst_high, AT);
+ __ Or(dst_high, dst_high, TMP);
+ __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+ __ Beqz(TMP, &done);
+ __ Move(TMP, dst_high);
+ __ Move(dst_high, dst_low);
+ __ Move(dst_low, TMP);
}
__ Bind(&done);
}
@@ -2314,8 +2386,7 @@ void InstructionCodeGeneratorMIPS::DivRemByPowerOfTwo(HBinaryOperation* instruct
Register out = locations->Out().AsRegister<Register>();
Register dividend = locations->InAt(0).AsRegister<Register>();
int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
- uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
- DCHECK(IsPowerOfTwo(abs_imm));
+ uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
int ctz_imm = CTZ(abs_imm);
if (instruction->IsDiv()) {
@@ -2418,7 +2489,7 @@ void InstructionCodeGeneratorMIPS::GenerateDivRemIntegral(HBinaryOperation* inst
// Do not generate anything. DivZeroCheck would prevent any code to be executed.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
- } else if (IsPowerOfTwo(std::abs(imm))) {
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
DivRemByPowerOfTwo(instruction);
} else {
DCHECK(imm <= -2 || imm >= 2);
@@ -3358,8 +3429,8 @@ void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DeoptimizationSlowPathMIPS(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCodeMIPS* slow_path =
+ deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
@@ -3371,6 +3442,10 @@ void LocationsBuilderMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
}
void InstructionCodeGeneratorMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ if (codegen_->HasStackMapAtCurrentPc()) {
+ // Ensure that we do not collide with the stack map of the previous instruction.
+ __ Nop();
+ }
codegen_->RecordPcInfo(info, info->GetDexPc());
}
@@ -3457,8 +3532,8 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
// Need to move to FP regs since FP results are returned in core registers.
__ Mtc1(locations->GetTemp(1).AsRegister<Register>(),
locations->Out().AsFpuRegister<FRegister>());
- __ Mthc1(locations->GetTemp(2).AsRegister<Register>(),
- locations->Out().AsFpuRegister<FRegister>());
+ __ MoveToFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
+ locations->Out().AsFpuRegister<FRegister>());
}
} else {
if (!Primitive::IsFloatingPointType(type)) {
@@ -3578,8 +3653,8 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
// Pass FP parameters in core registers.
__ Mfc1(locations->GetTemp(1).AsRegister<Register>(),
locations->InAt(1).AsFpuRegister<FRegister>());
- __ Mfhc1(locations->GetTemp(2).AsRegister<Register>(),
- locations->InAt(1).AsFpuRegister<FRegister>());
+ __ MoveFromFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
+ locations->InAt(1).AsFpuRegister<FRegister>());
}
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pA64Store),
instruction,
@@ -4536,14 +4611,12 @@ void InstructionCodeGeneratorMIPS::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UN
codegen_->GenerateFrameExit();
}
-void LocationsBuilderMIPS::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
+void LocationsBuilderMIPS::VisitRor(HRor* ror) {
+ HandleShift(ror);
}
-void InstructionCodeGeneratorMIPS::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
+void InstructionCodeGeneratorMIPS::VisitRor(HRor* ror) {
+ HandleShift(ror);
}
void LocationsBuilderMIPS::VisitShl(HShl* shl) {
@@ -4731,6 +4804,7 @@ void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) {
Primitive::Type input_type = conversion->GetInputType();
Primitive::Type result_type = conversion->GetResultType();
DCHECK_NE(input_type, result_type);
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
(result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
@@ -4738,8 +4812,9 @@ void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) {
}
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
- (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
+ if (!isR6 &&
+ ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
+ (result_type == Primitive::kPrimLong && Primitive::IsFloatingPointType(input_type)))) {
call_kind = LocationSummary::kCall;
}
@@ -4777,6 +4852,8 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
bool has_sign_extension = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+ bool fpu_32bit = codegen_->GetInstructionSetFeatures().Is32BitFloatingPoint();
DCHECK_NE(input_type, result_type);
@@ -4822,7 +4899,37 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi
<< " to " << result_type;
}
} else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
- if (input_type != Primitive::kPrimLong) {
+ if (input_type == Primitive::kPrimLong) {
+ if (isR6) {
+ // cvt.s.l/cvt.d.l requires MIPSR2+ with FR=1. MIPS32R6 is implemented as a secondary
+ // architecture on top of MIPS64R6, which has FR=1, and therefore can use the instruction.
+ Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ __ Mtc1(src_low, FTMP);
+ __ Mthc1(src_high, FTMP);
+ if (result_type == Primitive::kPrimFloat) {
+ __ Cvtsl(dst, FTMP);
+ } else {
+ __ Cvtdl(dst, FTMP);
+ }
+ } else {
+ int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
+ : QUICK_ENTRY_POINT(pL2d);
+ bool direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickL2f)
+ : IsDirectEntrypoint(kQuickL2d);
+ codegen_->InvokeRuntime(entry_offset,
+ conversion,
+ conversion->GetDexPc(),
+ nullptr,
+ direct);
+ if (result_type == Primitive::kPrimFloat) {
+ CheckEntrypointTypes<kQuickL2f, float, int64_t>();
+ } else {
+ CheckEntrypointTypes<kQuickL2d, double, int64_t>();
+ }
+ }
+ } else {
Register src = locations->InAt(0).AsRegister<Register>();
FRegister dst = locations->Out().AsFpuRegister<FRegister>();
__ Mtc1(src, FTMP);
@@ -4831,54 +4938,168 @@ void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversi
} else {
__ Cvtdw(dst, FTMP);
}
- } else {
- int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
- : QUICK_ENTRY_POINT(pL2d);
- bool direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickL2f)
- : IsDirectEntrypoint(kQuickL2d);
- codegen_->InvokeRuntime(entry_offset,
- conversion,
- conversion->GetDexPc(),
- nullptr,
- direct);
- if (result_type == Primitive::kPrimFloat) {
- CheckEntrypointTypes<kQuickL2f, float, int64_t>();
- } else {
- CheckEntrypointTypes<kQuickL2d, double, int64_t>();
- }
}
} else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
- int32_t entry_offset;
- bool direct;
- if (result_type != Primitive::kPrimLong) {
- entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
- : QUICK_ENTRY_POINT(pD2iz);
- direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickF2iz)
- : IsDirectEntrypoint(kQuickD2iz);
+ if (result_type == Primitive::kPrimLong) {
+ if (isR6) {
+ // trunc.l.s/trunc.l.d requires MIPSR2+ with FR=1. MIPS32R6 is implemented as a secondary
+ // architecture on top of MIPS64R6, which has FR=1, and therefore can use the instruction.
+ FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ MipsLabel truncate;
+ MipsLabel done;
+
+ // When NAN2008=0 (R2 and before), the truncate instruction produces the maximum positive
+ // value when the input is either a NaN or is outside of the range of the output type
+ // after the truncation. IOW, the three special cases (NaN, too small, too big) produce
+ // the same result.
+ //
+ // When NAN2008=1 (R6), the truncate instruction caps the output at the minimum/maximum
+ // value of the output type if the input is outside of the range after the truncation or
+ // produces 0 when the input is a NaN. IOW, the three special cases produce three distinct
+ // results. This matches the desired float/double-to-int/long conversion exactly.
+ //
+ // So, NAN2008 affects handling of negative values and NaNs by the truncate instruction.
+ //
+ // The following code supports both NAN2008=0 and NAN2008=1 behaviors of the truncate
+ // instruction, the reason being that the emulator implements NAN2008=0 on MIPS64R6,
+ // even though it must be NAN2008=1 on R6.
+ //
+ // The code takes care of the different behaviors by first comparing the input to the
+ // minimum output value (-2**-63 for truncating to long, -2**-31 for truncating to int).
+ // If the input is greater than or equal to the minimum, it procedes to the truncate
+ // instruction, which will handle such an input the same way irrespective of NAN2008.
+ // Otherwise the input is compared to itself to determine whether it is a NaN or not
+ // in order to return either zero or the minimum value.
+ //
+ // TODO: simplify this when the emulator correctly implements NAN2008=1 behavior of the
+ // truncate instruction for MIPS64R6.
+ if (input_type == Primitive::kPrimFloat) {
+ uint32_t min_val = bit_cast<uint32_t, float>(std::numeric_limits<int64_t>::min());
+ __ LoadConst32(TMP, min_val);
+ __ Mtc1(TMP, FTMP);
+ __ CmpLeS(FTMP, FTMP, src);
+ } else {
+ uint64_t min_val = bit_cast<uint64_t, double>(std::numeric_limits<int64_t>::min());
+ __ LoadConst32(TMP, High32Bits(min_val));
+ __ Mtc1(ZERO, FTMP);
+ __ Mthc1(TMP, FTMP);
+ __ CmpLeD(FTMP, FTMP, src);
+ }
+
+ __ Bc1nez(FTMP, &truncate);
+
+ if (input_type == Primitive::kPrimFloat) {
+ __ CmpEqS(FTMP, src, src);
+ } else {
+ __ CmpEqD(FTMP, src, src);
+ }
+ __ Move(dst_low, ZERO);
+ __ LoadConst32(dst_high, std::numeric_limits<int32_t>::min());
+ __ Mfc1(TMP, FTMP);
+ __ And(dst_high, dst_high, TMP);
+
+ __ B(&done);
+
+ __ Bind(&truncate);
+
+ if (input_type == Primitive::kPrimFloat) {
+ __ TruncLS(FTMP, src);
+ } else {
+ __ TruncLD(FTMP, src);
+ }
+ __ Mfc1(dst_low, FTMP);
+ __ Mfhc1(dst_high, FTMP);
+
+ __ Bind(&done);
+ } else {
+ int32_t entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
+ : QUICK_ENTRY_POINT(pD2l);
+ bool direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickF2l)
+ : IsDirectEntrypoint(kQuickD2l);
+ codegen_->InvokeRuntime(entry_offset, conversion, conversion->GetDexPc(), nullptr, direct);
+ if (input_type == Primitive::kPrimFloat) {
+ CheckEntrypointTypes<kQuickF2l, int64_t, float>();
+ } else {
+ CheckEntrypointTypes<kQuickD2l, int64_t, double>();
+ }
+ }
} else {
- entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
- : QUICK_ENTRY_POINT(pD2l);
- direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickF2l)
- : IsDirectEntrypoint(kQuickD2l);
- }
- codegen_->InvokeRuntime(entry_offset,
- conversion,
- conversion->GetDexPc(),
- nullptr,
- direct);
- if (result_type != Primitive::kPrimLong) {
+ FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
+ Register dst = locations->Out().AsRegister<Register>();
+ MipsLabel truncate;
+ MipsLabel done;
+
+ // The following code supports both NAN2008=0 and NAN2008=1 behaviors of the truncate
+ // instruction, the reason being that the emulator implements NAN2008=0 on MIPS64R6,
+ // even though it must be NAN2008=1 on R6.
+ //
+ // For details see the large comment above for the truncation of float/double to long on R6.
+ //
+ // TODO: simplify this when the emulator correctly implements NAN2008=1 behavior of the
+ // truncate instruction for MIPS64R6.
if (input_type == Primitive::kPrimFloat) {
- CheckEntrypointTypes<kQuickF2iz, int32_t, float>();
+ uint32_t min_val = bit_cast<uint32_t, float>(std::numeric_limits<int32_t>::min());
+ __ LoadConst32(TMP, min_val);
+ __ Mtc1(TMP, FTMP);
} else {
- CheckEntrypointTypes<kQuickD2iz, int32_t, double>();
+ uint64_t min_val = bit_cast<uint64_t, double>(std::numeric_limits<int32_t>::min());
+ __ LoadConst32(TMP, High32Bits(min_val));
+ __ Mtc1(ZERO, FTMP);
+ if (fpu_32bit) {
+ __ Mtc1(TMP, static_cast<FRegister>(FTMP + 1));
+ } else {
+ __ Mthc1(TMP, FTMP);
+ }
}
- } else {
+
+ if (isR6) {
+ if (input_type == Primitive::kPrimFloat) {
+ __ CmpLeS(FTMP, FTMP, src);
+ } else {
+ __ CmpLeD(FTMP, FTMP, src);
+ }
+ __ Bc1nez(FTMP, &truncate);
+
+ if (input_type == Primitive::kPrimFloat) {
+ __ CmpEqS(FTMP, src, src);
+ } else {
+ __ CmpEqD(FTMP, src, src);
+ }
+ __ LoadConst32(dst, std::numeric_limits<int32_t>::min());
+ __ Mfc1(TMP, FTMP);
+ __ And(dst, dst, TMP);
+ } else {
+ if (input_type == Primitive::kPrimFloat) {
+ __ ColeS(0, FTMP, src);
+ } else {
+ __ ColeD(0, FTMP, src);
+ }
+ __ Bc1t(0, &truncate);
+
+ if (input_type == Primitive::kPrimFloat) {
+ __ CeqS(0, src, src);
+ } else {
+ __ CeqD(0, src, src);
+ }
+ __ LoadConst32(dst, std::numeric_limits<int32_t>::min());
+ __ Movf(dst, ZERO, 0);
+ }
+
+ __ B(&done);
+
+ __ Bind(&truncate);
+
if (input_type == Primitive::kPrimFloat) {
- CheckEntrypointTypes<kQuickF2l, int64_t, float>();
+ __ TruncWS(FTMP, src);
} else {
- CheckEntrypointTypes<kQuickD2l, int64_t, double>();
+ __ TruncWD(FTMP, src);
}
+ __ Mfc1(dst, FTMP);
+
+ __ Bind(&done);
}
} else if (Primitive::IsFloatingPointType(result_type) &&
Primitive::IsFloatingPointType(input_type)) {
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 38302ad315..c3d4851ee9 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -197,7 +197,7 @@ class LocationsBuilderMIPS : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS);
};
-class InstructionCodeGeneratorMIPS : public HGraphVisitor {
+class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 05834ff063..38c32cad06 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -391,24 +391,24 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
- explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
+ explicit DeoptimizationSlowPathMIPS64(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
- mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
+ mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
};
@@ -1113,7 +1113,7 @@ void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruc
InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
CodeGeneratorMIPS64* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -1247,7 +1247,7 @@ void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instructio
}
void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
- DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
Primitive::Type type = instr->GetResultType();
@@ -1265,7 +1265,7 @@ void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
}
void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
- DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
LocationSummary* locations = instr->GetLocations();
Primitive::Type type = instr->GetType();
@@ -1290,13 +1290,19 @@ void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
: static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
- if (type == Primitive::kPrimInt) {
+ if (shift_value == 0) {
+ if (dst != lhs) {
+ __ Move(dst, lhs);
+ }
+ } else if (type == Primitive::kPrimInt) {
if (instr->IsShl()) {
__ Sll(dst, lhs, shift_value);
} else if (instr->IsShr()) {
__ Sra(dst, lhs, shift_value);
- } else {
+ } else if (instr->IsUShr()) {
__ Srl(dst, lhs, shift_value);
+ } else {
+ __ Rotr(dst, lhs, shift_value);
}
} else {
if (shift_value < 32) {
@@ -1304,8 +1310,10 @@ void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
__ Dsll(dst, lhs, shift_value);
} else if (instr->IsShr()) {
__ Dsra(dst, lhs, shift_value);
- } else {
+ } else if (instr->IsUShr()) {
__ Dsrl(dst, lhs, shift_value);
+ } else {
+ __ Drotr(dst, lhs, shift_value);
}
} else {
shift_value -= 32;
@@ -1313,8 +1321,10 @@ void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
__ Dsll32(dst, lhs, shift_value);
} else if (instr->IsShr()) {
__ Dsra32(dst, lhs, shift_value);
- } else {
+ } else if (instr->IsUShr()) {
__ Dsrl32(dst, lhs, shift_value);
+ } else {
+ __ Drotr32(dst, lhs, shift_value);
}
}
}
@@ -1324,16 +1334,20 @@ void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
__ Sllv(dst, lhs, rhs_reg);
} else if (instr->IsShr()) {
__ Srav(dst, lhs, rhs_reg);
- } else {
+ } else if (instr->IsUShr()) {
__ Srlv(dst, lhs, rhs_reg);
+ } else {
+ __ Rotrv(dst, lhs, rhs_reg);
}
} else {
if (instr->IsShl()) {
__ Dsllv(dst, lhs, rhs_reg);
} else if (instr->IsShr()) {
__ Dsrav(dst, lhs, rhs_reg);
- } else {
+ } else if (instr->IsUShr()) {
__ Dsrlv(dst, lhs, rhs_reg);
+ } else {
+ __ Drotrv(dst, lhs, rhs_reg);
}
}
}
@@ -1955,8 +1969,7 @@ void InstructionCodeGeneratorMIPS64::DivRemByPowerOfTwo(HBinaryOperation* instru
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
int64_t imm = Int64FromConstant(second.GetConstant());
- uint64_t abs_imm = static_cast<uint64_t>(std::abs(imm));
- DCHECK(IsPowerOfTwo(abs_imm));
+ uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm));
int ctz_imm = CTZ(abs_imm);
if (instruction->IsDiv()) {
@@ -2138,7 +2151,7 @@ void InstructionCodeGeneratorMIPS64::GenerateDivRemIntegral(HBinaryOperation* in
// Do not generate anything. DivZeroCheck would prevent any code to be executed.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
- } else if (IsPowerOfTwo(std::abs(imm))) {
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
DivRemByPowerOfTwo(instruction);
} else {
DCHECK(imm <= -2 || imm >= 2);
@@ -2736,9 +2749,8 @@ void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
- DeoptimizationSlowPathMIPS64(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCodeMIPS64* slow_path =
+ deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS64>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
@@ -2750,6 +2762,10 @@ void LocationsBuilderMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
}
void InstructionCodeGeneratorMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ if (codegen_->HasStackMapAtCurrentPc()) {
+ // Ensure that we do not collide with the stack map of the previous instruction.
+ __ Nop();
+ }
codegen_->RecordPcInfo(info, info->GetDexPc());
}
@@ -3722,14 +3738,12 @@ void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_
codegen_->GenerateFrameExit();
}
-void LocationsBuilderMIPS64::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
+void LocationsBuilderMIPS64::VisitRor(HRor* ror) {
+ HandleShift(ror);
}
-void InstructionCodeGeneratorMIPS64::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
+void InstructionCodeGeneratorMIPS64::VisitRor(HRor* ror) {
+ HandleShift(ror);
}
void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
@@ -3918,36 +3932,18 @@ void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
}
- LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
- (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
- call_kind = LocationSummary::kCall;
- }
-
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion);
- if (call_kind == LocationSummary::kNoCall) {
- if (Primitive::IsFloatingPointType(input_type)) {
- locations->SetInAt(0, Location::RequiresFpuRegister());
- } else {
- locations->SetInAt(0, Location::RequiresRegister());
- }
-
- if (Primitive::IsFloatingPointType(result_type)) {
- locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
- } else {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- }
+ if (Primitive::IsFloatingPointType(input_type)) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
} else {
- InvokeRuntimeCallingConvention calling_convention;
-
- if (Primitive::IsFloatingPointType(input_type)) {
- locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
- } else {
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- }
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
- locations->SetOut(calling_convention.GetReturnLocation(result_type));
+ if (Primitive::IsFloatingPointType(result_type)) {
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
}
@@ -3992,55 +3988,107 @@ void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conver
<< " to " << result_type;
}
} else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
- if (input_type != Primitive::kPrimLong) {
- FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
- GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
- __ Mtc1(src, FTMP);
+ FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
+ GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
+ if (input_type == Primitive::kPrimLong) {
+ __ Dmtc1(src, FTMP);
if (result_type == Primitive::kPrimFloat) {
- __ Cvtsw(dst, FTMP);
+ __ Cvtsl(dst, FTMP);
} else {
- __ Cvtdw(dst, FTMP);
+ __ Cvtdl(dst, FTMP);
}
} else {
- int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
- : QUICK_ENTRY_POINT(pL2d);
- codegen_->InvokeRuntime(entry_offset,
- conversion,
- conversion->GetDexPc(),
- nullptr);
+ __ Mtc1(src, FTMP);
if (result_type == Primitive::kPrimFloat) {
- CheckEntrypointTypes<kQuickL2f, float, int64_t>();
+ __ Cvtsw(dst, FTMP);
} else {
- CheckEntrypointTypes<kQuickL2d, double, int64_t>();
+ __ Cvtdw(dst, FTMP);
}
}
} else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
- int32_t entry_offset;
- if (result_type != Primitive::kPrimLong) {
- entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
- : QUICK_ENTRY_POINT(pD2iz);
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
+ Mips64Label truncate;
+ Mips64Label done;
+
+ // When NAN2008=0 (R2 and before), the truncate instruction produces the maximum positive
+ // value when the input is either a NaN or is outside of the range of the output type
+ // after the truncation. IOW, the three special cases (NaN, too small, too big) produce
+ // the same result.
+ //
+ // When NAN2008=1 (R6), the truncate instruction caps the output at the minimum/maximum
+ // value of the output type if the input is outside of the range after the truncation or
+ // produces 0 when the input is a NaN. IOW, the three special cases produce three distinct
+ // results. This matches the desired float/double-to-int/long conversion exactly.
+ //
+ // So, NAN2008 affects handling of negative values and NaNs by the truncate instruction.
+ //
+ // The following code supports both NAN2008=0 and NAN2008=1 behaviors of the truncate
+ // instruction, the reason being that the emulator implements NAN2008=0 on MIPS64R6,
+ // even though it must be NAN2008=1 on R6.
+ //
+ // The code takes care of the different behaviors by first comparing the input to the
+ // minimum output value (-2**-63 for truncating to long, -2**-31 for truncating to int).
+ // If the input is greater than or equal to the minimum, it procedes to the truncate
+ // instruction, which will handle such an input the same way irrespective of NAN2008.
+ // Otherwise the input is compared to itself to determine whether it is a NaN or not
+ // in order to return either zero or the minimum value.
+ //
+ // TODO: simplify this when the emulator correctly implements NAN2008=1 behavior of the
+ // truncate instruction for MIPS64R6.
+ if (input_type == Primitive::kPrimFloat) {
+ uint32_t min_val = (result_type == Primitive::kPrimLong)
+ ? bit_cast<uint32_t, float>(std::numeric_limits<int64_t>::min())
+ : bit_cast<uint32_t, float>(std::numeric_limits<int32_t>::min());
+ __ LoadConst32(TMP, min_val);
+ __ Mtc1(TMP, FTMP);
+ __ CmpLeS(FTMP, FTMP, src);
} else {
- entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
- : QUICK_ENTRY_POINT(pD2l);
+ uint64_t min_val = (result_type == Primitive::kPrimLong)
+ ? bit_cast<uint64_t, double>(std::numeric_limits<int64_t>::min())
+ : bit_cast<uint64_t, double>(std::numeric_limits<int32_t>::min());
+ __ LoadConst64(TMP, min_val);
+ __ Dmtc1(TMP, FTMP);
+ __ CmpLeD(FTMP, FTMP, src);
}
- codegen_->InvokeRuntime(entry_offset,
- conversion,
- conversion->GetDexPc(),
- nullptr);
- if (result_type != Primitive::kPrimLong) {
+
+ __ Bc1nez(FTMP, &truncate);
+
+ if (input_type == Primitive::kPrimFloat) {
+ __ CmpEqS(FTMP, src, src);
+ } else {
+ __ CmpEqD(FTMP, src, src);
+ }
+ if (result_type == Primitive::kPrimLong) {
+ __ LoadConst64(dst, std::numeric_limits<int64_t>::min());
+ } else {
+ __ LoadConst32(dst, std::numeric_limits<int32_t>::min());
+ }
+ __ Mfc1(TMP, FTMP);
+ __ And(dst, dst, TMP);
+
+ __ Bc(&done);
+
+ __ Bind(&truncate);
+
+ if (result_type == Primitive::kPrimLong) {
if (input_type == Primitive::kPrimFloat) {
- CheckEntrypointTypes<kQuickF2iz, int32_t, float>();
+ __ TruncLS(FTMP, src);
} else {
- CheckEntrypointTypes<kQuickD2iz, int32_t, double>();
+ __ TruncLD(FTMP, src);
}
+ __ Dmfc1(dst, FTMP);
} else {
if (input_type == Primitive::kPrimFloat) {
- CheckEntrypointTypes<kQuickF2l, int64_t, float>();
+ __ TruncWS(FTMP, src);
} else {
- CheckEntrypointTypes<kQuickD2l, int64_t, double>();
+ __ TruncWD(FTMP, src);
}
+ __ Mfc1(dst, FTMP);
}
+
+ __ Bind(&done);
} else if (Primitive::IsFloatingPointType(result_type) &&
Primitive::IsFloatingPointType(input_type)) {
FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 60ff96dc43..7182e8e987 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -201,7 +201,7 @@ class LocationsBuilderMIPS64 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS64);
};
-class InstructionCodeGeneratorMIPS64 : public HGraphVisitor {
+class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 86327fb741..6ab3aaff4b 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -365,11 +365,10 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
class DeoptimizationSlowPathX86 : public SlowPathCode {
public:
- explicit DeoptimizationSlowPathX86(HInstruction* instruction)
+ explicit DeoptimizationSlowPathX86(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- DCHECK(instruction_->IsDeoptimize());
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
@@ -383,7 +382,7 @@ class DeoptimizationSlowPathX86 : public SlowPathCode {
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86);
};
@@ -892,7 +891,7 @@ void CodeGeneratorX86::UpdateBlockedPairRegisters() const {
}
InstructionCodeGeneratorX86::InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -1611,9 +1610,7 @@ void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena())
- DeoptimizationSlowPathX86(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
@@ -1625,6 +1622,10 @@ void LocationsBuilderX86::VisitNativeDebugInfo(HNativeDebugInfo* info) {
}
void InstructionCodeGeneratorX86::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ if (codegen_->HasStackMapAtCurrentPc()) {
+ // Ensure that we do not collide with the stack map of the previous instruction.
+ __ nop();
+ }
codegen_->RecordPcInfo(info, info->GetDexPc());
}
@@ -3223,11 +3224,12 @@ void InstructionCodeGeneratorX86::DivByPowerOfTwo(HDiv* instruction) {
Register out_register = locations->Out().AsRegister<Register>();
Register input_register = locations->InAt(0).AsRegister<Register>();
int32_t imm = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ DCHECK(IsPowerOfTwo(AbsOrMin(imm)));
+ uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
- DCHECK(IsPowerOfTwo(std::abs(imm)));
Register num = locations->GetTemp(0).AsRegister<Register>();
- __ leal(num, Address(input_register, std::abs(imm) - 1));
+ __ leal(num, Address(input_register, abs_imm - 1));
__ testl(input_register, input_register);
__ cmovl(kGreaterEqual, num, input_register);
int shift = CTZ(imm);
@@ -3340,7 +3342,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr
// Do not generate anything for 0. DivZeroCheck would forbid any generated code.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
- } else if (is_div && IsPowerOfTwo(std::abs(imm))) {
+ } else if (is_div && IsPowerOfTwo(AbsOrMin(imm))) {
DivByPowerOfTwo(instruction->AsDiv());
} else {
DCHECK(imm <= -2 || imm >= 2);
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index df7347658b..c65c423eae 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -178,7 +178,7 @@ class LocationsBuilderX86 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86);
};
-class InstructionCodeGeneratorX86 : public HGraphVisitor {
+class InstructionCodeGeneratorX86 : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 76a4ce2e93..294b40e3d4 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -387,18 +387,16 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
class DeoptimizationSlowPathX86_64 : public SlowPathCode {
public:
- explicit DeoptimizationSlowPathX86_64(HInstruction* instruction)
+ explicit DeoptimizationSlowPathX86_64(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
- deoptimize,
- deoptimize->GetDexPc(),
+ instruction_,
+ instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
@@ -406,7 +404,7 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCode {
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86_64);
};
@@ -1000,7 +998,7 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
InstructionCodeGeneratorX86_64::InstructionCodeGeneratorX86_64(HGraph* graph,
CodeGeneratorX86_64* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -1594,9 +1592,7 @@ void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena())
- DeoptimizationSlowPathX86_64(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86_64>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
@@ -1608,6 +1604,10 @@ void LocationsBuilderX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
}
void InstructionCodeGeneratorX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ if (codegen_->HasStackMapAtCurrentPc()) {
+ // Ensure that we do not collide with the stack map of the previous instruction.
+ __ nop();
+ }
codegen_->RecordPcInfo(info, info->GetDexPc());
}
@@ -3350,13 +3350,13 @@ void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HDiv* instruction) {
CpuRegister numerator = locations->InAt(0).AsRegister<CpuRegister>();
int64_t imm = Int64FromConstant(second.GetConstant());
-
- DCHECK(IsPowerOfTwo(std::abs(imm)));
+ DCHECK(IsPowerOfTwo(AbsOrMin(imm)));
+ uint64_t abs_imm = AbsOrMin(imm);
CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
if (instruction->GetResultType() == Primitive::kPrimInt) {
- __ leal(tmp, Address(numerator, std::abs(imm) - 1));
+ __ leal(tmp, Address(numerator, abs_imm - 1));
__ testl(numerator, numerator);
__ cmov(kGreaterEqual, tmp, numerator);
int shift = CTZ(imm);
@@ -3371,7 +3371,7 @@ void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HDiv* instruction) {
DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
CpuRegister rdx = locations->GetTemp(0).AsRegister<CpuRegister>();
- codegen_->Load64BitValue(rdx, std::abs(imm) - 1);
+ codegen_->Load64BitValue(rdx, abs_imm - 1);
__ addq(rdx, numerator);
__ testq(numerator, numerator);
__ cmov(kGreaterEqual, rdx, numerator);
@@ -3529,7 +3529,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* in
// Do not generate anything. DivZeroCheck would prevent any code to be executed.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
- } else if (instruction->IsDiv() && IsPowerOfTwo(std::abs(imm))) {
+ } else if (instruction->IsDiv() && IsPowerOfTwo(AbsOrMin(imm))) {
DivByPowerOfTwo(instruction->AsDiv());
} else {
DCHECK(imm <= -2 || imm >= 2);
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index c5e8a04da6..505c9dcdad 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -183,7 +183,7 @@ class LocationsBuilderX86_64 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86_64);
};
-class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
+class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index c504ded54c..b90afb1d73 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -211,19 +211,6 @@ bool InstructionSimplifierVisitor::ReplaceRotateWithRor(HBinaryOperation* op,
// Try to replace a binary operation flanked by one UShr and one Shl with a bitfield rotation.
bool InstructionSimplifierVisitor::TryReplaceWithRotate(HBinaryOperation* op) {
- // This simplification is currently supported on x86, x86_64, ARM and ARM64.
- // TODO: Implement it for MIPS/64.
- const InstructionSet instruction_set = GetGraph()->GetInstructionSet();
- switch (instruction_set) {
- case kArm:
- case kArm64:
- case kThumb2:
- case kX86:
- case kX86_64:
- break;
- default:
- return false;
- }
DCHECK(op->IsAdd() || op->IsXor() || op->IsOr());
HInstruction* left = op->GetLeft();
HInstruction* right = op->GetRight();
@@ -1261,19 +1248,6 @@ void InstructionSimplifierVisitor::SimplifyStringEquals(HInvoke* instruction) {
void InstructionSimplifierVisitor::SimplifyRotate(HInvoke* invoke, bool is_left) {
DCHECK(invoke->IsInvokeStaticOrDirect());
DCHECK_EQ(invoke->GetOriginalInvokeType(), InvokeType::kStatic);
- // This simplification is currently supported on x86, x86_64, ARM and ARM64.
- // TODO: Implement it for MIPS/64.
- const InstructionSet instruction_set = GetGraph()->GetInstructionSet();
- switch (instruction_set) {
- case kArm:
- case kArm64:
- case kThumb2:
- case kX86:
- case kX86_64:
- break;
- default:
- return;
- }
HInstruction* value = invoke->InputAt(0);
HInstruction* distance = invoke->InputAt(1);
// Replace the invoke with an HRor.
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 4683aee603..b1fbf28204 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -502,9 +502,6 @@ static void GenUnsafeGet(HInvoke* invoke,
bool is_volatile,
CodeGeneratorARM* codegen) {
LocationSummary* locations = invoke->GetLocations();
- DCHECK((type == Primitive::kPrimInt) ||
- (type == Primitive::kPrimLong) ||
- (type == Primitive::kPrimNot));
ArmAssembler* assembler = codegen->GetAssembler();
Location base_loc = locations->InAt(1);
Register base = base_loc.AsRegister<Register>(); // Object pointer.
@@ -512,30 +509,67 @@ static void GenUnsafeGet(HInvoke* invoke,
Register offset = offset_loc.AsRegisterPairLow<Register>(); // Long offset, lo part only.
Location trg_loc = locations->Out();
- if (type == Primitive::kPrimLong) {
- Register trg_lo = trg_loc.AsRegisterPairLow<Register>();
- __ add(IP, base, ShifterOperand(offset));
- if (is_volatile && !codegen->GetInstructionSetFeatures().HasAtomicLdrdAndStrd()) {
- Register trg_hi = trg_loc.AsRegisterPairHigh<Register>();
- __ ldrexd(trg_lo, trg_hi, IP);
- } else {
- __ ldrd(trg_lo, Address(IP));
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register trg = trg_loc.AsRegister<Register>();
+ __ ldr(trg, Address(base, offset));
+ if (is_volatile) {
+ __ dmb(ISH);
+ }
+ break;
}
- } else {
- Register trg = trg_loc.AsRegister<Register>();
- __ ldr(trg, Address(base, offset));
- }
- if (is_volatile) {
- __ dmb(ISH);
- }
+ case Primitive::kPrimNot: {
+ Register trg = trg_loc.AsRegister<Register>();
+ if (kEmitCompilerReadBarrier) {
+ if (kUseBakerReadBarrier) {
+ Location temp = locations->GetTemp(0);
+ codegen->GenerateArrayLoadWithBakerReadBarrier(
+ invoke, trg_loc, base, 0U, offset_loc, temp, /* needs_null_check */ false);
+ if (is_volatile) {
+ __ dmb(ISH);
+ }
+ } else {
+ __ ldr(trg, Address(base, offset));
+ if (is_volatile) {
+ __ dmb(ISH);
+ }
+ codegen->GenerateReadBarrierSlow(invoke, trg_loc, trg_loc, base_loc, 0U, offset_loc);
+ }
+ } else {
+ __ ldr(trg, Address(base, offset));
+ if (is_volatile) {
+ __ dmb(ISH);
+ }
+ __ MaybeUnpoisonHeapReference(trg);
+ }
+ break;
+ }
- if (type == Primitive::kPrimNot) {
- codegen->MaybeGenerateReadBarrier(invoke, trg_loc, trg_loc, base_loc, 0U, offset_loc);
+ case Primitive::kPrimLong: {
+ Register trg_lo = trg_loc.AsRegisterPairLow<Register>();
+ __ add(IP, base, ShifterOperand(offset));
+ if (is_volatile && !codegen->GetInstructionSetFeatures().HasAtomicLdrdAndStrd()) {
+ Register trg_hi = trg_loc.AsRegisterPairHigh<Register>();
+ __ ldrexd(trg_lo, trg_hi, IP);
+ } else {
+ __ ldrd(trg_lo, Address(IP));
+ }
+ if (is_volatile) {
+ __ dmb(ISH);
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type " << type;
+ UNREACHABLE();
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+ HInvoke* invoke,
+ Primitive::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
@@ -548,25 +582,30 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // We need a temporary register for the read barrier marking slow
+ // path in InstructionCodeGeneratorARM::GenerateArrayLoadWithBakerReadBarrier.
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
void IntrinsicLocationsBuilderARM::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt);
}
void IntrinsicLocationsBuilderARM::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt);
}
void IntrinsicLocationsBuilderARM::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong);
}
void IntrinsicLocationsBuilderARM::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong);
}
void IntrinsicLocationsBuilderARM::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot);
}
void IntrinsicLocationsBuilderARM::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot);
}
void IntrinsicCodeGeneratorARM::VisitUnsafeGet(HInvoke* invoke) {
@@ -808,6 +847,9 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat
}
// Prevent reordering with prior memory operations.
+ // Emit a DMB ISH instruction instead of an DMB ISHST one, as the
+ // latter allows a preceding load to be delayed past the STXR
+ // instruction below.
__ dmb(ISH);
__ add(tmp_ptr, base, ShifterOperand(offset));
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index f723940444..81cab86c83 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1035,7 +1035,11 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat
__ Stlxr(tmp_32, value, MemOperand(tmp_ptr));
__ Cbnz(tmp_32, &loop_head);
} else {
- __ Dmb(InnerShareable, BarrierWrites);
+ // Emit a `Dmb(InnerShareable, BarrierAll)` (DMB ISH) instruction
+ // instead of a `Dmb(InnerShareable, BarrierWrites)` (DMB ISHST)
+ // one, as the latter allows a preceding load to be delayed past
+ // the STXR instruction below.
+ __ Dmb(InnerShareable, BarrierAll);
__ Bind(&loop_head);
// TODO: When `type == Primitive::kPrimNot`, add a read barrier for
// the reference stored in the object before attempting the CAS,
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 06fab616ad..bc126a2716 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -43,14 +43,18 @@ ArenaAllocator* IntrinsicCodeGeneratorMIPS::GetAllocator() {
return codegen_->GetGraph()->GetArena();
}
-inline bool IntrinsicCodeGeneratorMIPS::IsR2OrNewer() {
+inline bool IntrinsicCodeGeneratorMIPS::IsR2OrNewer() const {
return codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
}
-inline bool IntrinsicCodeGeneratorMIPS::IsR6() {
+inline bool IntrinsicCodeGeneratorMIPS::IsR6() const {
return codegen_->GetInstructionSetFeatures().IsR6();
}
+inline bool IntrinsicCodeGeneratorMIPS::Is32BitFPU() const {
+ return codegen_->GetInstructionSetFeatures().Is32BitFloatingPoint();
+}
+
#define __ codegen->GetAssembler()->
static void MoveFromReturnRegister(Location trg,
@@ -162,7 +166,7 @@ static void MoveFPToInt(LocationSummary* locations, bool is64bit, MipsAssembler*
Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
__ Mfc1(out_lo, in);
- __ Mfhc1(out_hi, in);
+ __ MoveFromFpuHigh(out_hi, in);
} else {
Register out = locations->Out().AsRegister<Register>();
@@ -204,7 +208,7 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, MipsAssembler*
Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
__ Mtc1(in_lo, out);
- __ Mthc1(in_hi, out);
+ __ MoveToFpuHigh(in_hi, out);
} else {
Register in = locations->InAt(0).AsRegister<Register>();
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index f86b0efe4a..575a7d0a23 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -67,8 +67,9 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
#undef OPTIMIZING_INTRINSICS
- bool IsR2OrNewer(void);
- bool IsR6(void);
+ bool IsR2OrNewer() const;
+ bool IsR6() const;
+ bool Is32BitFPU() const;
private:
MipsAssembler* GetAssembler();
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index f5a7048b01..b80c6bde82 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2183,10 +2183,7 @@ void HInvoke::SetIntrinsic(Intrinsics intrinsic,
IntrinsicExceptions exceptions) {
intrinsic_ = intrinsic;
IntrinsicOptimizations opt(this);
- if (needs_env_or_cache == kNoEnvironmentOrCache) {
- opt.SetDoesNotNeedDexCache();
- opt.SetDoesNotNeedEnvironment();
- }
+
// Adjust method's side effects from intrinsic table.
switch (side_effects) {
case kNoSideEffects: SetSideEffects(SideEffects::None()); break;
@@ -2194,6 +2191,14 @@ void HInvoke::SetIntrinsic(Intrinsics intrinsic,
case kWriteSideEffects: SetSideEffects(SideEffects::AllWrites()); break;
case kAllSideEffects: SetSideEffects(SideEffects::AllExceptGCDependency()); break;
}
+
+ if (needs_env_or_cache == kNoEnvironmentOrCache) {
+ opt.SetDoesNotNeedDexCache();
+ opt.SetDoesNotNeedEnvironment();
+ } else {
+ // If we need an environment, that means there will be a call, which can trigger GC.
+ SetSideEffects(GetSideEffects().Union(SideEffects::CanTriggerGC()));
+ }
// Adjust method's exception status from intrinsic table.
switch (exceptions) {
case kNoThrow: SetCanThrow(false); break;
@@ -2325,4 +2330,19 @@ HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction*
}
}
+std::ostream& operator<<(std::ostream& os, const MoveOperands& rhs) {
+ os << "["
+ << " source=" << rhs.GetSource()
+ << " destination=" << rhs.GetDestination()
+ << " type=" << rhs.GetType()
+ << " instruction=";
+ if (rhs.GetInstruction() != nullptr) {
+ os << rhs.GetInstruction()->DebugName() << ' ' << rhs.GetInstruction()->GetId();
+ } else {
+ os << "null";
+ }
+ os << " ]";
+ return os;
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 59c07690b1..23132308f0 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1881,6 +1881,10 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
return false;
}
+ virtual bool IsActualObject() const {
+ return GetType() == Primitive::kPrimNot;
+ }
+
void SetReferenceTypeInfo(ReferenceTypeInfo rti);
ReferenceTypeInfo GetReferenceTypeInfo() const {
@@ -2500,8 +2504,10 @@ class HTryBoundary : public HTemplateInstruction<0> {
// Deoptimize to interpreter, upon checking a condition.
class HDeoptimize : public HTemplateInstruction<1> {
public:
+ // We set CanTriggerGC to prevent any intermediate address to be live
+ // at the point of the `HDeoptimize`.
HDeoptimize(HInstruction* cond, uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::None(), dex_pc) {
+ : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, cond);
}
@@ -4017,8 +4023,10 @@ class HRem : public HBinaryOperation {
class HDivZeroCheck : public HExpression<1> {
public:
+ // `HDivZeroCheck` can trigger GC, as it may call the `ArithmeticException`
+ // constructor.
HDivZeroCheck(HInstruction* value, uint32_t dex_pc)
- : HExpression(value->GetType(), SideEffects::None(), dex_pc) {
+ : HExpression(value->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, value);
}
@@ -4539,8 +4547,10 @@ class HPhi : public HInstruction {
class HNullCheck : public HExpression<1> {
public:
+ // `HNullCheck` can trigger GC, as it may call the `NullPointerException`
+ // constructor.
HNullCheck(HInstruction* value, uint32_t dex_pc)
- : HExpression(value->GetType(), SideEffects::None(), dex_pc) {
+ : HExpression(value->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, value);
}
@@ -4861,8 +4871,10 @@ class HArrayLength : public HExpression<1> {
class HBoundsCheck : public HExpression<2> {
public:
+ // `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException`
+ // constructor.
HBoundsCheck(HInstruction* index, HInstruction* length, uint32_t dex_pc)
- : HExpression(index->GetType(), SideEffects::None(), dex_pc) {
+ : HExpression(index->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
DCHECK(index->GetType() == Primitive::kPrimInt);
SetRawInputAt(0, index);
SetRawInputAt(1, length);
@@ -5626,8 +5638,8 @@ class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> {
}
bool IsPending() const {
- DCHECK(!source_.IsInvalid() || destination_.IsInvalid());
- return destination_.IsInvalid() && !source_.IsInvalid();
+ DCHECK(source_.IsValid() || destination_.IsInvalid());
+ return destination_.IsInvalid() && source_.IsValid();
}
// True if this blocks a move from the given location.
@@ -5671,6 +5683,8 @@ class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> {
HInstruction* instruction_;
};
+std::ostream& operator<<(std::ostream& os, const MoveOperands& rhs);
+
static constexpr size_t kDefaultNumberOfMoves = 4;
class HParallelMove : public HTemplateInstruction<0> {
diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h
index 18405f2623..445cdab191 100644
--- a/compiler/optimizing/nodes_arm64.h
+++ b/compiler/optimizing/nodes_arm64.h
@@ -107,6 +107,7 @@ class HArm64IntermediateAddress : public HExpression<2> {
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; }
+ bool IsActualObject() const OVERRIDE { return false; }
HInstruction* GetBaseAddress() const { return InputAt(0); }
HInstruction* GetOffset() const { return InputAt(1); }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index cafc6c5440..bb840eabdd 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -17,6 +17,7 @@
#include "optimizing_compiler.h"
#include <fstream>
+#include <memory>
#include <stdint.h>
#ifdef ART_ENABLE_CODEGEN_arm64
@@ -52,6 +53,8 @@
#include "driver/compiler_driver-inl.h"
#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
+#include "dwarf/method_debug_info.h"
+#include "elf_writer_debug.h"
#include "elf_writer_quick.h"
#include "graph_checker.h"
#include "graph_visualizer.h"
@@ -60,6 +63,7 @@
#include "inliner.h"
#include "instruction_simplifier.h"
#include "intrinsics.h"
+#include "jit/debugger_interface.h"
#include "jit/jit_code_cache.h"
#include "licm.h"
#include "jni/quick/jni_compiler.h"
@@ -68,6 +72,7 @@
#include "prepare_for_register_allocation.h"
#include "reference_type_propagation.h"
#include "register_allocator.h"
+#include "oat_quick_method_header.h"
#include "sharpening.h"
#include "side_effects_analysis.h"
#include "ssa_builder.h"
@@ -965,6 +970,39 @@ bool OptimizingCompiler::JitCompile(Thread* self,
return false;
}
+ if (GetCompilerDriver()->GetCompilerOptions().GetGenerateDebugInfo()) {
+ const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
+ const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
+ CompiledMethod compiled_method(
+ GetCompilerDriver(),
+ codegen->GetInstructionSet(),
+ ArrayRef<const uint8_t>(code_allocator.GetMemory()),
+ codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
+ codegen->GetCoreSpillMask(),
+ codegen->GetFpuSpillMask(),
+ ArrayRef<const SrcMapElem>(),
+ ArrayRef<const uint8_t>(), // mapping_table.
+ ArrayRef<const uint8_t>(stack_map_data, stack_map_size),
+ ArrayRef<const uint8_t>(), // native_gc_map.
+ ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
+ ArrayRef<const LinkerPatch>());
+ dwarf::MethodDebugInfo method_debug_info {
+ dex_file,
+ class_def_idx,
+ method_idx,
+ access_flags,
+ code_item,
+ false, // deduped.
+ code_address,
+ code_address + code_allocator.GetSize(),
+ &compiled_method
+ };
+ ArrayRef<const uint8_t> elf_file = dwarf::WriteDebugElfFileForMethod(method_debug_info);
+ CreateJITCodeEntryForAddress(code_address,
+ std::unique_ptr<const uint8_t[]>(elf_file.data()),
+ elf_file.size());
+ }
+
return true;
}
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index 176c50ce21..9d136f3ae6 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -13,7 +13,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include <iostream>
#include "parallel_move_resolver.h"
@@ -172,7 +171,7 @@ MoveOperands* ParallelMoveResolverWithSwap::PerformMove(size_t index) {
i = -1;
} else if (required_swap != nullptr) {
// A move is required to swap. We walk back the cycle to find the
- // move by just returning from this `PerforrmMove`.
+ // move by just returning from this `PerformMove`.
moves_[index]->ClearPending(destination);
return required_swap;
}
@@ -201,7 +200,7 @@ MoveOperands* ParallelMoveResolverWithSwap::PerformMove(size_t index) {
} else {
for (MoveOperands* other_move : moves_) {
if (other_move->Blocks(destination)) {
- DCHECK(other_move->IsPending());
+ DCHECK(other_move->IsPending()) << "move=" << *move << " other_move=" << *other_move;
if (!move->Is64BitMove() && other_move->Is64BitMove()) {
// We swap 64bits moves before swapping 32bits moves. Go back from the
// cycle by returning the move that must be swapped.
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index d1770b75ab..63ef600756 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -96,7 +96,7 @@ void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) {
if (can_merge_with_load_class && !load_class->HasUses()) {
load_class->GetBlock()->RemoveInstruction(load_class);
}
- } else if (can_merge_with_load_class) {
+ } else if (can_merge_with_load_class && !load_class->NeedsAccessCheck()) {
// Pass the initialization duty to the `HLoadClass` instruction,
// and remove the instruction from the graph.
load_class->SetMustGenerateClinitCheck(true);
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 5ab4547e22..2bae4bc5c8 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -1679,6 +1679,9 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
LocationSummary* locations = safepoint_position->GetLocations();
if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
+ DCHECK(interval->GetDefinedBy()->IsActualObject())
+ << interval->GetDefinedBy()->DebugName()
+ << "@" << safepoint_position->GetInstruction()->DebugName();
locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
}
@@ -1691,6 +1694,9 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
maximum_number_of_live_fp_registers_);
}
if (current->GetType() == Primitive::kPrimNot) {
+ DCHECK(interval->GetDefinedBy()->IsActualObject())
+ << interval->GetDefinedBy()->DebugName()
+ << "@" << safepoint_position->GetInstruction()->DebugName();
locations->SetRegisterBit(source.reg());
}
break;
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index c60a4eacaa..4784de1380 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -270,7 +270,7 @@ void StackMapStream::FillIn(MemoryRegion region) {
stack_map.SetStackMask(stack_map_encoding_, *entry.sp_mask);
}
- if (entry.num_dex_registers == 0) {
+ if (entry.num_dex_registers == 0 || (entry.live_dex_registers_mask->NumSetBits() == 0)) {
// No dex map available.
stack_map.SetDexRegisterMapOffset(stack_map_encoding_, StackMap::kNoDexRegisterMap);
} else {
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 560502fde6..604787fd92 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -614,6 +614,10 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
stream.EndStackMapEntry();
+ number_of_dex_registers = 1;
+ stream.BeginStackMapEntry(1, 67, 0x4, &sp_mask, number_of_dex_registers, 0);
+ stream.EndStackMapEntry();
+
size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
@@ -622,7 +626,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
CodeInfo code_info(region);
StackMapEncoding encoding = code_info.ExtractEncoding();
ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask());
- ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
+ ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
ASSERT_EQ(0u, number_of_location_catalog_entries);
@@ -638,6 +642,16 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding));
ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
+
+ stack_map = code_info.GetStackMapAt(1, encoding);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1, encoding)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(67, encoding)));
+ ASSERT_EQ(1u, stack_map.GetDexPc(encoding));
+ ASSERT_EQ(67u, stack_map.GetNativePcOffset(encoding));
+ ASSERT_EQ(0x4u, stack_map.GetRegisterMask(encoding));
+
+ ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
}
TEST(StackMapTest, InlineTest) {
diff --git a/compiler/profile_assistant.cc b/compiler/profile_assistant.cc
new file mode 100644
index 0000000000..81f2a5692d
--- /dev/null
+++ b/compiler/profile_assistant.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "profile_assistant.h"
+
+namespace art {
+
+// Minimum number of new methods that profiles must contain to enable recompilation.
+static constexpr const uint32_t kMinNewMethodsForCompilation = 10;
+
+bool ProfileAssistant::ProcessProfiles(
+ const std::vector<std::string>& profile_files,
+ const std::vector<std::string>& reference_profile_files,
+ /*out*/ ProfileCompilationInfo** profile_compilation_info) {
+ DCHECK(!profile_files.empty());
+ DCHECK(reference_profile_files.empty() ||
+ (profile_files.size() == reference_profile_files.size()));
+
+ std::vector<ProfileCompilationInfo> new_info(profile_files.size());
+ bool should_compile = false;
+ // Read the main profile files.
+ for (size_t i = 0; i < profile_files.size(); i++) {
+ if (!new_info[i].Load(profile_files[i])) {
+ LOG(WARNING) << "Could not load profile file: " << profile_files[i];
+ return false;
+ }
+ // Do we have enough new profiled methods that will make the compilation worthwhile?
+ should_compile |= (new_info[i].GetNumberOfMethods() > kMinNewMethodsForCompilation);
+ }
+ if (!should_compile) {
+ *profile_compilation_info = nullptr;
+ return true;
+ }
+
+ std::unique_ptr<ProfileCompilationInfo> result(new ProfileCompilationInfo());
+ for (size_t i = 0; i < new_info.size(); i++) {
+ // Merge all data into a single object.
+ result->Load(new_info[i]);
+ // If we have any reference profile information merge their information with
+ // the current profiles and save them back to disk.
+ if (!reference_profile_files.empty()) {
+ if (!new_info[i].Load(reference_profile_files[i])) {
+ LOG(WARNING) << "Could not load reference profile file: " << reference_profile_files[i];
+ return false;
+ }
+ if (!new_info[i].Save(reference_profile_files[i])) {
+ LOG(WARNING) << "Could not save reference profile file: " << reference_profile_files[i];
+ return false;
+ }
+ }
+ }
+ *profile_compilation_info = result.release();
+ return true;
+}
+
+} // namespace art
diff --git a/compiler/profile_assistant.h b/compiler/profile_assistant.h
new file mode 100644
index 0000000000..088c8bd1c7
--- /dev/null
+++ b/compiler/profile_assistant.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_PROFILE_ASSISTANT_H_
+#define ART_COMPILER_PROFILE_ASSISTANT_H_
+
+#include <string>
+#include <vector>
+
+#include "jit/offline_profiling_info.cc"
+
+namespace art {
+
+class ProfileAssistant {
+ public:
+ // Process the profile information present in the given files. Returns true
+ // if the analysis ended up successfully (i.e. no errors during reading,
+ // merging or writing of profile files).
+ //
+ // If the returned value is true and there is a significant difference between
+ // profile_files and reference_profile_files:
+ // - profile_compilation_info is set to a not null object that
+ // can be used to drive compilation. It will be the merge of all the data
+ // found in profile_files and reference_profile_files.
+ // - the data from profile_files[i] is merged into
+ // reference_profile_files[i] and the corresponding backing file is
+ // updated.
+ //
+ // If the returned value is false or the difference is insignificant,
+ // profile_compilation_info will be set to null.
+ //
+ // Additional notes:
+ // - as mentioned above, this function may update the content of the files
+ // passed with the reference_profile_files.
+ // - if reference_profile_files is not empty it must be the same size as
+ // profile_files.
+ static bool ProcessProfiles(
+ const std::vector<std::string>& profile_files,
+ const std::vector<std::string>& reference_profile_files,
+ /*out*/ ProfileCompilationInfo** profile_compilation_info);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ProfileAssistant);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_PROFILE_ASSISTANT_H_
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index b79c2f0f4e..f96376d9fe 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -501,6 +501,8 @@ class ArmAssembler : public Assembler {
virtual void cmp(Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
+ // Note: CMN updates flags based on addition of its operands. Do not confuse
+ // the "N" suffix with bitwise inversion performed by MVN.
virtual void cmn(Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
virtual void orr(Register rd, Register rn, const ShifterOperand& so,
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index f341030c15..52023a67ee 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -3428,10 +3428,10 @@ void Thumb2Assembler::AddConstant(Register rd, Register rn, int32_t value,
CHECK(rn != IP);
// If rd != rn, use rd as temp. This alows 16-bit ADD/SUB in more situations than using IP.
Register temp = (rd != rn) ? rd : IP;
- if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~value, set_cc, &shifter_op)) {
+ if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~value, kCcKeep, &shifter_op)) {
mvn(temp, shifter_op, cond, kCcKeep);
add(rd, rn, ShifterOperand(temp), cond, set_cc);
- } else if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~(-value), set_cc, &shifter_op)) {
+ } else if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~(-value), kCcKeep, &shifter_op)) {
mvn(temp, shifter_op, cond, kCcKeep);
sub(rd, rn, ShifterOperand(temp), cond, set_cc);
} else if (High16Bits(-value) == 0) {
@@ -3449,22 +3449,32 @@ void Thumb2Assembler::AddConstant(Register rd, Register rn, int32_t value,
}
void Thumb2Assembler::CmpConstant(Register rn, int32_t value, Condition cond) {
- // We prefer to select the shorter code sequence rather than selecting add for
- // positive values and sub for negatives ones, which would slightly improve
- // the readability of generated code for some constants.
+ // We prefer to select the shorter code sequence rather than using plain cmp and cmn
+ // which would slightly improve the readability of generated code for some constants.
ShifterOperand shifter_op;
if (ShifterOperandCanHold(kNoRegister, rn, CMP, value, kCcSet, &shifter_op)) {
cmp(rn, shifter_op, cond);
- } else if (ShifterOperandCanHold(kNoRegister, rn, CMN, ~value, kCcSet, &shifter_op)) {
+ } else if (ShifterOperandCanHold(kNoRegister, rn, CMN, -value, kCcSet, &shifter_op)) {
cmn(rn, shifter_op, cond);
} else {
CHECK(rn != IP);
- movw(IP, Low16Bits(value), cond);
- uint16_t value_high = High16Bits(value);
- if (value_high != 0) {
- movt(IP, value_high, cond);
+ if (ShifterOperandCanHold(IP, kNoRegister, MVN, ~value, kCcKeep, &shifter_op)) {
+ mvn(IP, shifter_op, cond, kCcKeep);
+ cmp(rn, ShifterOperand(IP), cond);
+ } else if (ShifterOperandCanHold(IP, kNoRegister, MVN, ~(-value), kCcKeep, &shifter_op)) {
+ mvn(IP, shifter_op, cond, kCcKeep);
+ cmn(rn, ShifterOperand(IP), cond);
+ } else if (High16Bits(-value) == 0) {
+ movw(IP, Low16Bits(-value), cond);
+ cmn(rn, ShifterOperand(IP), cond);
+ } else {
+ movw(IP, Low16Bits(value), cond);
+ uint16_t value_high = High16Bits(value);
+ if (value_high != 0) {
+ movt(IP, value_high, cond);
+ }
+ cmp(rn, ShifterOperand(IP), cond);
}
- cmp(rn, ShifterOperand(IP), cond);
}
}
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 0ef0dc19e6..2df9b177bf 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -1626,6 +1626,76 @@ TEST(Thumb2AssemblerTest, AddConstant) {
EmitAndCheck(&assembler, "AddConstant");
}
+TEST(Thumb2AssemblerTest, CmpConstant) {
+ arm::Thumb2Assembler assembler;
+
+ __ CmpConstant(R0, 0); // 16-bit CMP.
+ __ CmpConstant(R1, 1); // 16-bit CMP.
+ __ CmpConstant(R0, 7); // 16-bit CMP.
+ __ CmpConstant(R1, 8); // 16-bit CMP.
+ __ CmpConstant(R0, 255); // 16-bit CMP.
+ __ CmpConstant(R1, 256); // 32-bit CMP.
+ __ CmpConstant(R0, 257); // MNV+CMN.
+ __ CmpConstant(R1, 0xfff); // MOVW+CMP.
+ __ CmpConstant(R0, 0x1000); // 32-bit CMP.
+ __ CmpConstant(R1, 0x1001); // MNV+CMN.
+ __ CmpConstant(R0, 0x1002); // MOVW+CMP.
+ __ CmpConstant(R1, 0xffff); // MOVW+CMP.
+ __ CmpConstant(R0, 0x10000); // 32-bit CMP.
+ __ CmpConstant(R1, 0x10001); // 32-bit CMP.
+ __ CmpConstant(R0, 0x10002); // MVN+CMN.
+ __ CmpConstant(R1, 0x10003); // MOVW+MOVT+CMP.
+ __ CmpConstant(R0, -1); // 32-bit CMP.
+ __ CmpConstant(R1, -7); // CMN.
+ __ CmpConstant(R0, -8); // CMN.
+ __ CmpConstant(R1, -255); // CMN.
+ __ CmpConstant(R0, -256); // CMN.
+ __ CmpConstant(R1, -257); // MNV+CMP.
+ __ CmpConstant(R0, -0xfff); // MOVW+CMN.
+ __ CmpConstant(R1, -0x1000); // CMN.
+ __ CmpConstant(R0, -0x1001); // MNV+CMP.
+ __ CmpConstant(R1, -0x1002); // MOVW+CMN.
+ __ CmpConstant(R0, -0xffff); // MOVW+CMN.
+ __ CmpConstant(R1, -0x10000); // CMN.
+ __ CmpConstant(R0, -0x10001); // CMN.
+ __ CmpConstant(R1, -0x10002); // MVN+CMP.
+ __ CmpConstant(R0, -0x10003); // MOVW+MOVT+CMP.
+
+ __ CmpConstant(R8, 0); // 32-bit CMP.
+ __ CmpConstant(R9, 1); // 32-bit CMP.
+ __ CmpConstant(R8, 7); // 32-bit CMP.
+ __ CmpConstant(R9, 8); // 32-bit CMP.
+ __ CmpConstant(R8, 255); // 32-bit CMP.
+ __ CmpConstant(R9, 256); // 32-bit CMP.
+ __ CmpConstant(R8, 257); // MNV+CMN
+ __ CmpConstant(R9, 0xfff); // MOVW+CMP.
+ __ CmpConstant(R8, 0x1000); // 32-bit CMP.
+ __ CmpConstant(R9, 0x1001); // MVN+CMN.
+ __ CmpConstant(R8, 0x1002); // MOVW+CMP.
+ __ CmpConstant(R9, 0xffff); // MOVW+CMP.
+ __ CmpConstant(R8, 0x10000); // 32-bit CMP.
+ __ CmpConstant(R9, 0x10001); // 32-bit CMP.
+ __ CmpConstant(R8, 0x10002); // MVN+CMN.
+ __ CmpConstant(R9, 0x10003); // MOVW+MOVT+CMP.
+ __ CmpConstant(R8, -1); // 32-bit CMP
+ __ CmpConstant(R9, -7); // CMN.
+ __ CmpConstant(R8, -8); // CMN.
+ __ CmpConstant(R9, -255); // CMN.
+ __ CmpConstant(R8, -256); // CMN.
+ __ CmpConstant(R9, -257); // MNV+CMP.
+ __ CmpConstant(R8, -0xfff); // MOVW+CMN.
+ __ CmpConstant(R9, -0x1000); // CMN.
+ __ CmpConstant(R8, -0x1001); // MVN+CMP.
+ __ CmpConstant(R9, -0x1002); // MOVW+CMN.
+ __ CmpConstant(R8, -0xffff); // MOVW+CMN.
+ __ CmpConstant(R9, -0x10000); // CMN.
+ __ CmpConstant(R8, -0x10001); // CMN.
+ __ CmpConstant(R9, -0x10002); // MVN+CMP.
+ __ CmpConstant(R8, -0x10003); // MOVW+MOVT+CMP.
+
+ EmitAndCheck(&assembler, "CmpConstant");
+}
+
#undef __
} // namespace arm
} // namespace art
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index f07f8c74d7..6736015bf1 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -1,4 +1,4 @@
-const char* SimpleMovResults[] = {
+const char* const SimpleMovResults[] = {
" 0: 0008 movs r0, r1\n",
" 2: 4608 mov r0, r1\n",
" 4: 46c8 mov r8, r9\n",
@@ -6,18 +6,18 @@ const char* SimpleMovResults[] = {
" 8: f04f 0809 mov.w r8, #9\n",
nullptr
};
-const char* SimpleMov32Results[] = {
+const char* const SimpleMov32Results[] = {
" 0: ea4f 0001 mov.w r0, r1\n",
" 4: ea4f 0809 mov.w r8, r9\n",
nullptr
};
-const char* SimpleMovAddResults[] = {
+const char* const SimpleMovAddResults[] = {
" 0: 4608 mov r0, r1\n",
" 2: 1888 adds r0, r1, r2\n",
" 4: 1c08 adds r0, r1, #0\n",
nullptr
};
-const char* DataProcessingRegisterResults[] = {
+const char* const DataProcessingRegisterResults[] = {
" 0: ea6f 0001 mvn.w r0, r1\n",
" 4: eb01 0002 add.w r0, r1, r2\n",
" 8: eba1 0002 sub.w r0, r1, r2\n",
@@ -129,7 +129,7 @@ const char* DataProcessingRegisterResults[] = {
" 120: eb01 0c00 add.w ip, r1, r0\n",
nullptr
};
-const char* DataProcessingImmediateResults[] = {
+const char* const DataProcessingImmediateResults[] = {
" 0: 2055 movs r0, #85 ; 0x55\n",
" 2: f06f 0055 mvn.w r0, #85 ; 0x55\n",
" 6: f101 0055 add.w r0, r1, #85 ; 0x55\n",
@@ -154,7 +154,7 @@ const char* DataProcessingImmediateResults[] = {
" 48: 1f48 subs r0, r1, #5\n",
nullptr
};
-const char* DataProcessingModifiedImmediateResults[] = {
+const char* const DataProcessingModifiedImmediateResults[] = {
" 0: f04f 1055 mov.w r0, #5570645 ; 0x550055\n",
" 4: f06f 1055 mvn.w r0, #5570645 ; 0x550055\n",
" 8: f101 1055 add.w r0, r1, #5570645 ; 0x550055\n",
@@ -173,7 +173,7 @@ const char* DataProcessingModifiedImmediateResults[] = {
" 3c: f110 1f55 cmn.w r0, #5570645 ; 0x550055\n",
nullptr
};
-const char* DataProcessingModifiedImmediatesResults[] = {
+const char* const DataProcessingModifiedImmediatesResults[] = {
" 0: f04f 1055 mov.w r0, #5570645 ; 0x550055\n",
" 4: f04f 2055 mov.w r0, #1426085120 ; 0x55005500\n",
" 8: f04f 3055 mov.w r0, #1431655765 ; 0x55555555\n",
@@ -183,7 +183,7 @@ const char* DataProcessingModifiedImmediatesResults[] = {
" 18: f44f 70d4 mov.w r0, #424 ; 0x1a8\n",
nullptr
};
-const char* DataProcessingShiftedRegisterResults[] = {
+const char* const DataProcessingShiftedRegisterResults[] = {
" 0: 0123 lsls r3, r4, #4\n",
" 2: 0963 lsrs r3, r4, #5\n",
" 4: 11a3 asrs r3, r4, #6\n",
@@ -201,7 +201,7 @@ const char* DataProcessingShiftedRegisterResults[] = {
" 32: ea5f 0834 movs.w r8, r4, rrx\n",
nullptr
};
-const char* ShiftImmediateResults[] = {
+const char* const ShiftImmediateResults[] = {
" 0: 0123 lsls r3, r4, #4\n",
" 2: 0963 lsrs r3, r4, #5\n",
" 4: 11a3 asrs r3, r4, #6\n",
@@ -219,7 +219,7 @@ const char* ShiftImmediateResults[] = {
" 32: ea5f 0834 movs.w r8, r4, rrx\n",
nullptr
};
-const char* BasicLoadResults[] = {
+const char* const BasicLoadResults[] = {
" 0: 69a3 ldr r3, [r4, #24]\n",
" 2: 7e23 ldrb r3, [r4, #24]\n",
" 4: 8b23 ldrh r3, [r4, #24]\n",
@@ -233,7 +233,7 @@ const char* BasicLoadResults[] = {
" 20: f9b4 8018 ldrsh.w r8, [r4, #24]\n",
nullptr
};
-const char* BasicStoreResults[] = {
+const char* const BasicStoreResults[] = {
" 0: 61a3 str r3, [r4, #24]\n",
" 2: 7623 strb r3, [r4, #24]\n",
" 4: 8323 strh r3, [r4, #24]\n",
@@ -243,7 +243,7 @@ const char* BasicStoreResults[] = {
" 10: f8a4 8018 strh.w r8, [r4, #24]\n",
nullptr
};
-const char* ComplexLoadResults[] = {
+const char* const ComplexLoadResults[] = {
" 0: 69a3 ldr r3, [r4, #24]\n",
" 2: f854 3f18 ldr.w r3, [r4, #24]!\n",
" 6: f854 3b18 ldr.w r3, [r4], #24\n",
@@ -276,7 +276,7 @@ const char* ComplexLoadResults[] = {
" 6e: f934 3918 ldrsh.w r3, [r4], #-24\n",
nullptr
};
-const char* ComplexStoreResults[] = {
+const char* const ComplexStoreResults[] = {
" 0: 61a3 str r3, [r4, #24]\n",
" 2: f844 3f18 str.w r3, [r4, #24]!\n",
" 6: f844 3b18 str.w r3, [r4], #24\n",
@@ -297,7 +297,7 @@ const char* ComplexStoreResults[] = {
" 3e: f824 3918 strh.w r3, [r4], #-24\n",
nullptr
};
-const char* NegativeLoadStoreResults[] = {
+const char* const NegativeLoadStoreResults[] = {
" 0: f854 3c18 ldr.w r3, [r4, #-24]\n",
" 4: f854 3d18 ldr.w r3, [r4, #-24]!\n",
" 8: f854 3918 ldr.w r3, [r4], #-24\n",
@@ -348,12 +348,12 @@ const char* NegativeLoadStoreResults[] = {
" bc: f824 3b18 strh.w r3, [r4], #24\n",
nullptr
};
-const char* SimpleLoadStoreDualResults[] = {
+const char* const SimpleLoadStoreDualResults[] = {
" 0: e9c0 2306 strd r2, r3, [r0, #24]\n",
" 4: e9d0 2306 ldrd r2, r3, [r0, #24]\n",
nullptr
};
-const char* ComplexLoadStoreDualResults[] = {
+const char* const ComplexLoadStoreDualResults[] = {
" 0: e9c0 2306 strd r2, r3, [r0, #24]\n",
" 4: e9e0 2306 strd r2, r3, [r0, #24]!\n",
" 8: e8e0 2306 strd r2, r3, [r0], #24\n",
@@ -368,7 +368,7 @@ const char* ComplexLoadStoreDualResults[] = {
" 2c: e870 2306 ldrd r2, r3, [r0], #-24\n",
nullptr
};
-const char* NegativeLoadStoreDualResults[] = {
+const char* const NegativeLoadStoreDualResults[] = {
" 0: e940 2306 strd r2, r3, [r0, #-24]\n",
" 4: e960 2306 strd r2, r3, [r0, #-24]!\n",
" 8: e860 2306 strd r2, r3, [r0], #-24\n",
@@ -383,7 +383,7 @@ const char* NegativeLoadStoreDualResults[] = {
" 2c: e8f0 2306 ldrd r2, r3, [r0], #24\n",
nullptr
};
-const char* SimpleBranchResults[] = {
+const char* const SimpleBranchResults[] = {
" 0: 2002 movs r0, #2\n",
" 2: 2101 movs r1, #1\n",
" 4: e7fd b.n 2 <SimpleBranch+0x2>\n",
@@ -403,7 +403,7 @@ const char* SimpleBranchResults[] = {
" 20: 2006 movs r0, #6\n",
nullptr
};
-const char* LongBranchResults[] = {
+const char* const LongBranchResults[] = {
" 0: f04f 0002 mov.w r0, #2\n",
" 4: f04f 0101 mov.w r1, #1\n",
" 8: f7ff bffc b.w 4 <LongBranch+0x4>\n",
@@ -423,14 +423,14 @@ const char* LongBranchResults[] = {
" 40: f04f 0006 mov.w r0, #6\n",
nullptr
};
-const char* LoadMultipleResults[] = {
+const char* const LoadMultipleResults[] = {
" 0: cc09 ldmia r4!, {r0, r3}\n",
" 2: e934 4800 ldmdb r4!, {fp, lr}\n",
" 6: e914 4800 ldmdb r4, {fp, lr}\n",
" a: f854 5b04 ldr.w r5, [r4], #4\n",
nullptr
};
-const char* StoreMultipleResults[] = {
+const char* const StoreMultipleResults[] = {
" 0: c409 stmia r4!, {r0, r3}\n",
" 2: e8a4 4800 stmia.w r4!, {fp, lr}\n",
" 6: e884 4800 stmia.w r4, {fp, lr}\n",
@@ -438,7 +438,7 @@ const char* StoreMultipleResults[] = {
" e: f844 5d04 str.w r5, [r4, #-4]!\n",
nullptr
};
-const char* MovWMovTResults[] = {
+const char* const MovWMovTResults[] = {
" 0: f240 0400 movw r4, #0\n",
" 4: f240 0434 movw r4, #52 ; 0x34\n",
" 8: f240 0934 movw r9, #52 ; 0x34\n",
@@ -449,7 +449,7 @@ const char* MovWMovTResults[] = {
" 1c: f6cf 71ff movt r1, #65535 ; 0xffff\n",
nullptr
};
-const char* SpecialAddSubResults[] = {
+const char* const SpecialAddSubResults[] = {
" 0: aa14 add r2, sp, #80 ; 0x50\n",
" 2: b014 add sp, #80 ; 0x50\n",
" 4: f10d 0850 add.w r8, sp, #80 ; 0x50\n",
@@ -463,7 +463,7 @@ const char* SpecialAddSubResults[] = {
" 22: f6ad 7dfc subw sp, sp, #4092 ; 0xffc\n",
nullptr
};
-const char* LoadFromOffsetResults[] = {
+const char* const LoadFromOffsetResults[] = {
" 0: 68e2 ldr r2, [r4, #12]\n",
" 2: f8d4 2fff ldr.w r2, [r4, #4095] ; 0xfff\n",
" 6: f504 5280 add.w r2, r4, #4096 ; 0x1000\n",
@@ -514,7 +514,7 @@ const char* LoadFromOffsetResults[] = {
" 9e: f9b4 200c ldrsh.w r2, [r4, #12]\n",
nullptr
};
-const char* StoreToOffsetResults[] = {
+const char* const StoreToOffsetResults[] = {
" 0: 60e2 str r2, [r4, #12]\n",
" 2: f8c4 2fff str.w r2, [r4, #4095] ; 0xfff\n",
" 6: f504 5c80 add.w ip, r4, #4096 ; 0x1000\n",
@@ -563,7 +563,7 @@ const char* StoreToOffsetResults[] = {
" a4: 7322 strb r2, [r4, #12]\n",
nullptr
};
-const char* IfThenResults[] = {
+const char* const IfThenResults[] = {
" 0: bf08 it eq\n",
" 2: 2101 moveq r1, #1\n",
" 4: bf04 itt eq\n",
@@ -587,7 +587,7 @@ const char* IfThenResults[] = {
" 28: 2404 movne r4, #4\n",
nullptr
};
-const char* CbzCbnzResults[] = {
+const char* const CbzCbnzResults[] = {
" 0: b10a cbz r2, 6 <CbzCbnz+0x6>\n",
" 2: 2103 movs r1, #3\n",
" 4: 2203 movs r2, #3\n",
@@ -598,7 +598,7 @@ const char* CbzCbnzResults[] = {
" 10: 2204 movs r2, #4\n",
nullptr
};
-const char* MultiplyResults[] = {
+const char* const MultiplyResults[] = {
" 0: 4348 muls r0, r1\n",
" 2: fb01 f002 mul.w r0, r1, r2\n",
" 6: fb09 f808 mul.w r8, r9, r8\n",
@@ -611,21 +611,21 @@ const char* MultiplyResults[] = {
" 22: fbaa 890b umull r8, r9, sl, fp\n",
nullptr
};
-const char* DivideResults[] = {
+const char* const DivideResults[] = {
" 0: fb91 f0f2 sdiv r0, r1, r2\n",
" 4: fb99 f8fa sdiv r8, r9, sl\n",
" 8: fbb1 f0f2 udiv r0, r1, r2\n",
" c: fbb9 f8fa udiv r8, r9, sl\n",
nullptr
};
-const char* VMovResults[] = {
+const char* const VMovResults[] = {
" 0: eef7 0a00 vmov.f32 s1, #112 ; 0x70\n",
" 4: eeb7 1b00 vmov.f64 d1, #112 ; 0x70\n",
" 8: eef0 0a41 vmov.f32 s1, s2\n",
" c: eeb0 1b42 vmov.f64 d1, d2\n",
nullptr
};
-const char* BasicFloatingPointResults[] = {
+const char* const BasicFloatingPointResults[] = {
" 0: ee30 0a81 vadd.f32 s0, s1, s2\n",
" 4: ee30 0ac1 vsub.f32 s0, s1, s2\n",
" 8: ee20 0a81 vmul.f32 s0, s1, s2\n",
@@ -646,7 +646,7 @@ const char* BasicFloatingPointResults[] = {
" 44: eeb1 0bc1 vsqrt.f64 d0, d1\n",
nullptr
};
-const char* FloatingPointConversionsResults[] = {
+const char* const FloatingPointConversionsResults[] = {
" 0: eeb7 1bc2 vcvt.f32.f64 s2, d2\n",
" 4: eeb7 2ac1 vcvt.f64.f32 d2, s2\n",
" 8: eefd 0ac1 vcvt.s32.f32 s1, s2\n",
@@ -659,35 +659,35 @@ const char* FloatingPointConversionsResults[] = {
" 24: eeb8 1b41 vcvt.f64.u32 d1, s2\n",
nullptr
};
-const char* FloatingPointComparisonsResults[] = {
+const char* const FloatingPointComparisonsResults[] = {
" 0: eeb4 0a60 vcmp.f32 s0, s1\n",
" 4: eeb4 0b41 vcmp.f64 d0, d1\n",
" 8: eeb5 1a40 vcmp.f32 s2, #0.0\n",
" c: eeb5 2b40 vcmp.f64 d2, #0.0\n",
nullptr
};
-const char* CallsResults[] = {
+const char* const CallsResults[] = {
" 0: 47f0 blx lr\n",
" 2: 4770 bx lr\n",
nullptr
};
-const char* BreakpointResults[] = {
+const char* const BreakpointResults[] = {
" 0: be00 bkpt 0x0000\n",
nullptr
};
-const char* StrR1Results[] = {
+const char* const StrR1Results[] = {
" 0: 9111 str r1, [sp, #68] ; 0x44\n",
" 2: f8cd 142c str.w r1, [sp, #1068] ; 0x42c\n",
nullptr
};
-const char* VPushPopResults[] = {
+const char* const VPushPopResults[] = {
" 0: ed2d 1a04 vpush {s2-s5}\n",
" 4: ed2d 2b08 vpush {d2-d5}\n",
" 8: ecbd 1a04 vpop {s2-s5}\n",
" c: ecbd 2b08 vpop {d2-d5}\n",
nullptr
};
-const char* Max16BitBranchResults[] = {
+const char* const Max16BitBranchResults[] = {
" 0: e3ff b.n 802 <Max16BitBranch+0x802>\n",
" 2: 2300 movs r3, #0\n",
" 4: 2302 movs r3, #2\n",
@@ -1716,7 +1716,7 @@ const char* Max16BitBranchResults[] = {
" 802: 4611 mov r1, r2\n",
nullptr
};
-const char* Branch32Results[] = {
+const char* const Branch32Results[] = {
" 0: f000 bc01 b.w 806 <Branch32+0x806>\n",
" 4: 2300 movs r3, #0\n",
" 6: 2302 movs r3, #2\n",
@@ -2746,7 +2746,7 @@ const char* Branch32Results[] = {
" 806: 4611 mov r1, r2\n",
nullptr
};
-const char* CompareAndBranchMaxResults[] = {
+const char* const CompareAndBranchMaxResults[] = {
" 0: b3fc cbz r4, 82 <CompareAndBranchMax+0x82>\n",
" 2: 2300 movs r3, #0\n",
" 4: 2302 movs r3, #2\n",
@@ -2815,7 +2815,7 @@ const char* CompareAndBranchMaxResults[] = {
" 82: 4611 mov r1, r2\n",
nullptr
};
-const char* CompareAndBranchRelocation16Results[] = {
+const char* const CompareAndBranchRelocation16Results[] = {
" 0: 2c00 cmp r4, #0\n",
" 2: d040 beq.n 86 <CompareAndBranchRelocation16+0x86>\n",
" 4: 2300 movs r3, #0\n",
@@ -2886,7 +2886,7 @@ const char* CompareAndBranchRelocation16Results[] = {
" 86: 4611 mov r1, r2\n",
nullptr
};
-const char* CompareAndBranchRelocation32Results[] = {
+const char* const CompareAndBranchRelocation32Results[] = {
" 0: 2c00 cmp r4, #0\n",
" 2: f000 8401 beq.w 808 <CompareAndBranchRelocation32+0x808>\n",
" 6: 2300 movs r3, #0\n",
@@ -3917,7 +3917,7 @@ const char* CompareAndBranchRelocation32Results[] = {
" 808: 4611 mov r1, r2\n",
nullptr
};
-const char* MixedBranch32Results[] = {
+const char* const MixedBranch32Results[] = {
" 0: f000 bc03 b.w 80a <MixedBranch32+0x80a>\n",
" 4: 2300 movs r3, #0\n",
" 6: 2302 movs r3, #2\n",
@@ -4948,7 +4948,7 @@ const char* MixedBranch32Results[] = {
" 80a: 4611 mov r1, r2\n",
nullptr
};
-const char* ShiftsResults[] = {
+const char* const ShiftsResults[] = {
" 0: 0148 lsls r0, r1, #5\n",
" 2: 0948 lsrs r0, r1, #5\n",
" 4: 1148 asrs r0, r1, #5\n",
@@ -4997,7 +4997,7 @@ const char* ShiftsResults[] = {
" 98: fa51 f008 asrs.w r0, r1, r8\n",
nullptr
};
-const char* LoadStoreRegOffsetResults[] = {
+const char* const LoadStoreRegOffsetResults[] = {
" 0: 5888 ldr r0, [r1, r2]\n",
" 2: 5088 str r0, [r1, r2]\n",
" 4: f851 0012 ldr.w r0, [r1, r2, lsl #1]\n",
@@ -5012,7 +5012,7 @@ const char* LoadStoreRegOffsetResults[] = {
" 28: f841 0008 str.w r0, [r1, r8]\n",
nullptr
};
-const char* LoadStoreLiteralResults[] = {
+const char* const LoadStoreLiteralResults[] = {
" 0: 4801 ldr r0, [pc, #4] ; (8 <LoadStoreLiteral+0x8>)\n",
" 2: f8cf 0004 str.w r0, [pc, #4] ; 8 <LoadStoreLiteral+0x8>\n",
" 6: f85f 0008 ldr.w r0, [pc, #-8] ; 0 <LoadStoreLiteral>\n",
@@ -5023,7 +5023,7 @@ const char* LoadStoreLiteralResults[] = {
" 18: f8cf 07ff str.w r0, [pc, #2047] ; 81b <LoadStoreLiteral+0x81b>\n",
nullptr
};
-const char* LoadStoreLimitsResults[] = {
+const char* const LoadStoreLimitsResults[] = {
" 0: 6fe0 ldr r0, [r4, #124] ; 0x7c\n",
" 2: f8d4 0080 ldr.w r0, [r4, #128] ; 0x80\n",
" 6: 7fe0 ldrb r0, [r4, #31]\n",
@@ -5042,7 +5042,7 @@ const char* LoadStoreLimitsResults[] = {
" 30: f8a4 0040 strh.w r0, [r4, #64] ; 0x40\n",
nullptr
};
-const char* CompareAndBranchResults[] = {
+const char* const CompareAndBranchResults[] = {
" 0: b130 cbz r0, 10 <CompareAndBranch+0x10>\n",
" 2: f1bb 0f00 cmp.w fp, #0\n",
" 6: d003 beq.n 10 <CompareAndBranch+0x10>\n",
@@ -5052,7 +5052,7 @@ const char* CompareAndBranchResults[] = {
nullptr
};
-const char* AddConstantResults[] = {
+const char* const AddConstantResults[] = {
" 0: 4608 mov r0, r1\n",
" 2: 1c48 adds r0, r1, #1\n",
" 4: 1dc8 adds r0, r1, #7\n",
@@ -5370,6 +5370,104 @@ const char* AddConstantResults[] = {
nullptr
};
+const char* const CmpConstantResults[] = {
+ " 0: 2800 cmp r0, #0\n",
+ " 2: 2901 cmp r1, #1\n",
+ " 4: 2807 cmp r0, #7\n",
+ " 6: 2908 cmp r1, #8\n",
+ " 8: 28ff cmp r0, #255 ; 0xff\n",
+ " a: f5b1 7f80 cmp.w r1, #256 ; 0x100\n",
+ " e: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
+ " 12: eb10 0f0c cmn.w r0, ip\n",
+ " 16: f640 7cff movw ip, #4095 ; 0xfff\n",
+ " 1a: 4561 cmp r1, ip\n",
+ " 1c: f5b0 5f80 cmp.w r0, #4096 ; 0x1000\n",
+ " 20: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " 24: eb11 0f0c cmn.w r1, ip\n",
+ " 28: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " 2c: 4560 cmp r0, ip\n",
+ " 2e: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " 32: 4561 cmp r1, ip\n",
+ " 34: f5b0 3f80 cmp.w r0, #65536 ; 0x10000\n",
+ " 38: f1b1 1f01 cmp.w r1, #65537 ; 0x10001\n",
+ " 3c: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " 40: eb10 0f0c cmn.w r0, ip\n",
+ " 44: f240 0c03 movw ip, #3\n",
+ " 48: f2c0 0c01 movt ip, #1\n",
+ " 4c: 4561 cmp r1, ip\n",
+ " 4e: f1b0 3fff cmp.w r0, #4294967295 ; 0xffffffff\n",
+ " 52: f111 0f07 cmn.w r1, #7\n",
+ " 56: f110 0f08 cmn.w r0, #8\n",
+ " 5a: f111 0fff cmn.w r1, #255 ; 0xff\n",
+ " 5e: f510 7f80 cmn.w r0, #256 ; 0x100\n",
+ " 62: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
+ " 66: 4561 cmp r1, ip\n",
+ " 68: f640 7cff movw ip, #4095 ; 0xfff\n",
+ " 6c: eb10 0f0c cmn.w r0, ip\n",
+ " 70: f511 5f80 cmn.w r1, #4096 ; 0x1000\n",
+ " 74: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " 78: 4560 cmp r0, ip\n",
+ " 7a: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " 7e: eb11 0f0c cmn.w r1, ip\n",
+ " 82: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " 86: eb10 0f0c cmn.w r0, ip\n",
+ " 8a: f511 3f80 cmn.w r1, #65536 ; 0x10000\n",
+ " 8e: f110 1f01 cmn.w r0, #65537 ; 0x10001\n",
+ " 92: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " 96: 4561 cmp r1, ip\n",
+ " 98: f64f 7cfd movw ip, #65533 ; 0xfffd\n",
+ " 9c: f6cf 7cfe movt ip, #65534 ; 0xfffe\n",
+ " a0: 4560 cmp r0, ip\n",
+ " a2: f1b8 0f00 cmp.w r8, #0\n",
+ " a6: f1b9 0f01 cmp.w r9, #1\n",
+ " aa: f1b8 0f07 cmp.w r8, #7\n",
+ " ae: f1b9 0f08 cmp.w r9, #8\n",
+ " b2: f1b8 0fff cmp.w r8, #255 ; 0xff\n",
+ " b6: f5b9 7f80 cmp.w r9, #256 ; 0x100\n",
+ " ba: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
+ " be: eb18 0f0c cmn.w r8, ip\n",
+ " c2: f640 7cff movw ip, #4095 ; 0xfff\n",
+ " c6: 45e1 cmp r9, ip\n",
+ " c8: f5b8 5f80 cmp.w r8, #4096 ; 0x1000\n",
+ " cc: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " d0: eb19 0f0c cmn.w r9, ip\n",
+ " d4: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " d8: 45e0 cmp r8, ip\n",
+ " da: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " de: 45e1 cmp r9, ip\n",
+ " e0: f5b8 3f80 cmp.w r8, #65536 ; 0x10000\n",
+ " e4: f1b9 1f01 cmp.w r9, #65537 ; 0x10001\n",
+ " e8: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " ec: eb18 0f0c cmn.w r8, ip\n",
+ " f0: f240 0c03 movw ip, #3\n",
+ " f4: f2c0 0c01 movt ip, #1\n",
+ " f8: 45e1 cmp r9, ip\n",
+ " fa: f1b8 3fff cmp.w r8, #4294967295 ; 0xffffffff\n",
+ " fe: f119 0f07 cmn.w r9, #7\n",
+ " 102: f118 0f08 cmn.w r8, #8\n",
+ " 106: f119 0fff cmn.w r9, #255 ; 0xff\n",
+ " 10a: f518 7f80 cmn.w r8, #256 ; 0x100\n",
+ " 10e: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
+ " 112: 45e1 cmp r9, ip\n",
+ " 114: f640 7cff movw ip, #4095 ; 0xfff\n",
+ " 118: eb18 0f0c cmn.w r8, ip\n",
+ " 11c: f519 5f80 cmn.w r9, #4096 ; 0x1000\n",
+ " 120: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " 124: 45e0 cmp r8, ip\n",
+ " 126: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " 12a: eb19 0f0c cmn.w r9, ip\n",
+ " 12e: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " 132: eb18 0f0c cmn.w r8, ip\n",
+ " 136: f519 3f80 cmn.w r9, #65536 ; 0x10000\n",
+ " 13a: f118 1f01 cmn.w r8, #65537 ; 0x10001\n",
+ " 13e: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " 142: 45e1 cmp r9, ip\n",
+ " 144: f64f 7cfd movw ip, #65533 ; 0xfffd\n",
+ " 148: f6cf 7cfe movt ip, #65534 ; 0xfffe\n",
+ " 14c: 45e0 cmp r8, ip\n",
+ nullptr
+};
+
std::map<std::string, const char* const*> test_results;
void setup_results() {
test_results["SimpleMov"] = SimpleMovResults;
@@ -5421,4 +5519,5 @@ void setup_results() {
test_results["LoadStoreLimits"] = LoadStoreLimitsResults;
test_results["CompareAndBranch"] = CompareAndBranchResults;
test_results["AddConstant"] = AddConstantResults;
+ test_results["CmpConstant"] = CmpConstantResults;
}
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 0dc307c9ac..ac9c097892 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -1035,6 +1035,22 @@ void MipsAssembler::Movt(Register rd, Register rs, int cc) {
EmitR(0, rs, static_cast<Register>((cc << 2) | 1), rd, 0, 0x01);
}
+void MipsAssembler::TruncLS(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x09);
+}
+
+void MipsAssembler::TruncLD(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x09);
+}
+
+void MipsAssembler::TruncWS(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x0D);
+}
+
+void MipsAssembler::TruncWD(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x0D);
+}
+
void MipsAssembler::Cvtsw(FRegister fd, FRegister fs) {
EmitFR(0x11, 0x14, static_cast<FRegister>(0), fs, fd, 0x20);
}
@@ -1051,6 +1067,14 @@ void MipsAssembler::Cvtds(FRegister fd, FRegister fs) {
EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x21);
}
+void MipsAssembler::Cvtsl(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x15, static_cast<FRegister>(0), fs, fd, 0x20);
+}
+
+void MipsAssembler::Cvtdl(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x15, static_cast<FRegister>(0), fs, fd, 0x21);
+}
+
void MipsAssembler::Mfc1(Register rt, FRegister fs) {
EmitFR(0x11, 0x00, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
}
@@ -1067,6 +1091,24 @@ void MipsAssembler::Mthc1(Register rt, FRegister fs) {
EmitFR(0x11, 0x07, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
}
+void MipsAssembler::MoveFromFpuHigh(Register rt, FRegister fs) {
+ if (Is32BitFPU()) {
+ CHECK_EQ(fs % 2, 0) << fs;
+ Mfc1(rt, static_cast<FRegister>(fs + 1));
+ } else {
+ Mfhc1(rt, fs);
+ }
+}
+
+void MipsAssembler::MoveToFpuHigh(Register rt, FRegister fs) {
+ if (Is32BitFPU()) {
+ CHECK_EQ(fs % 2, 0) << fs;
+ Mtc1(rt, static_cast<FRegister>(fs + 1));
+ } else {
+ Mthc1(rt, fs);
+ }
+}
+
void MipsAssembler::Lwc1(FRegister ft, Register rs, uint16_t imm16) {
EmitI(0x31, rs, static_cast<Register>(ft), imm16);
}
@@ -1213,10 +1255,10 @@ void MipsAssembler::LoadDConst64(FRegister rd, int64_t value, Register temp) {
Mtc1(temp, rd);
}
if (high == 0) {
- Mthc1(ZERO, rd);
+ MoveToFpuHigh(ZERO, rd);
} else {
LoadConst32(temp, high);
- Mthc1(temp, rd);
+ MoveToFpuHigh(temp, rd);
}
}
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 066e7b0014..01c6490f88 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -265,15 +265,23 @@ class MipsAssembler FINAL : public Assembler {
void Movf(Register rd, Register rs, int cc); // R2
void Movt(Register rd, Register rs, int cc); // R2
+ void TruncLS(FRegister fd, FRegister fs); // R2+, FR=1
+ void TruncLD(FRegister fd, FRegister fs); // R2+, FR=1
+ void TruncWS(FRegister fd, FRegister fs);
+ void TruncWD(FRegister fd, FRegister fs);
void Cvtsw(FRegister fd, FRegister fs);
void Cvtdw(FRegister fd, FRegister fs);
void Cvtsd(FRegister fd, FRegister fs);
void Cvtds(FRegister fd, FRegister fs);
+ void Cvtsl(FRegister fd, FRegister fs); // R2+, FR=1
+ void Cvtdl(FRegister fd, FRegister fs); // R2+, FR=1
void Mfc1(Register rt, FRegister fs);
void Mtc1(Register rt, FRegister fs);
void Mfhc1(Register rt, FRegister fs);
void Mthc1(Register rt, FRegister fs);
+ void MoveFromFpuHigh(Register rt, FRegister fs);
+ void MoveToFpuHigh(Register rt, FRegister fs);
void Lwc1(FRegister ft, Register rs, uint16_t imm16);
void Ldc1(FRegister ft, Register rs, uint16_t imm16);
void Swc1(FRegister ft, Register rs, uint16_t imm16);
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index 4361843c54..5fc3deebd3 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -599,6 +599,14 @@ TEST_F(AssemblerMIPSTest, CvtDW) {
DriverStr(RepeatFF(&mips::MipsAssembler::Cvtdw, "cvt.d.w ${reg1}, ${reg2}"), "CvtDW");
}
+TEST_F(AssemblerMIPSTest, CvtSL) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::Cvtsl, "cvt.s.l ${reg1}, ${reg2}"), "CvtSL");
+}
+
+TEST_F(AssemblerMIPSTest, CvtDL) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::Cvtdl, "cvt.d.l ${reg1}, ${reg2}"), "CvtDL");
+}
+
TEST_F(AssemblerMIPSTest, CvtSD) {
DriverStr(RepeatFF(&mips::MipsAssembler::Cvtsd, "cvt.s.d ${reg1}, ${reg2}"), "CvtSD");
}
@@ -607,6 +615,22 @@ TEST_F(AssemblerMIPSTest, CvtDS) {
DriverStr(RepeatFF(&mips::MipsAssembler::Cvtds, "cvt.d.s ${reg1}, ${reg2}"), "CvtDS");
}
+TEST_F(AssemblerMIPSTest, TruncWS) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::TruncWS, "trunc.w.s ${reg1}, ${reg2}"), "TruncWS");
+}
+
+TEST_F(AssemblerMIPSTest, TruncWD) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::TruncWD, "trunc.w.d ${reg1}, ${reg2}"), "TruncWD");
+}
+
+TEST_F(AssemblerMIPSTest, TruncLS) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::TruncLS, "trunc.l.s ${reg1}, ${reg2}"), "TruncLS");
+}
+
+TEST_F(AssemblerMIPSTest, TruncLD) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::TruncLD, "trunc.l.d ${reg1}, ${reg2}"), "TruncLD");
+}
+
TEST_F(AssemblerMIPSTest, Mfc1) {
DriverStr(RepeatRF(&mips::MipsAssembler::Mfc1, "mfc1 ${reg1}, ${reg2}"), "Mfc1");
}
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index cfd8421e93..f9ff2df8bb 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -771,6 +771,22 @@ void Mips64Assembler::RoundWD(FpuRegister fd, FpuRegister fs) {
EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xc);
}
+void Mips64Assembler::TruncLS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x9);
+}
+
+void Mips64Assembler::TruncLD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x9);
+}
+
+void Mips64Assembler::TruncWS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xd);
+}
+
+void Mips64Assembler::TruncWD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xd);
+}
+
void Mips64Assembler::CeilLS(FpuRegister fd, FpuRegister fs) {
EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xa);
}
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 883f013f87..3262640ce7 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -250,6 +250,10 @@ class Mips64Assembler FINAL : public Assembler {
void RoundLD(FpuRegister fd, FpuRegister fs);
void RoundWS(FpuRegister fd, FpuRegister fs);
void RoundWD(FpuRegister fd, FpuRegister fs);
+ void TruncLS(FpuRegister fd, FpuRegister fs);
+ void TruncLD(FpuRegister fd, FpuRegister fs);
+ void TruncWS(FpuRegister fd, FpuRegister fs);
+ void TruncWD(FpuRegister fd, FpuRegister fs);
void CeilLS(FpuRegister fd, FpuRegister fs);
void CeilLD(FpuRegister fd, FpuRegister fs);
void CeilWS(FpuRegister fd, FpuRegister fs);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index bac4375b35..7d79be2731 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -527,6 +527,22 @@ TEST_F(AssemblerMIPS64Test, CvtSW) {
DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtsw, "cvt.s.w ${reg1}, ${reg2}"), "cvt.s.w");
}
+TEST_F(AssemblerMIPS64Test, TruncWS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::TruncWS, "trunc.w.s ${reg1}, ${reg2}"), "trunc.w.s");
+}
+
+TEST_F(AssemblerMIPS64Test, TruncWD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::TruncWD, "trunc.w.d ${reg1}, ${reg2}"), "trunc.w.d");
+}
+
+TEST_F(AssemblerMIPS64Test, TruncLS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::TruncLS, "trunc.l.s ${reg1}, ${reg2}"), "trunc.l.s");
+}
+
+TEST_F(AssemblerMIPS64Test, TruncLD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::TruncLD, "trunc.l.d ${reg1}, ${reg2}"), "trunc.l.d");
+}
+
////////////////
// CALL / JMP //
////////////////
diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc
index 42ed8810f8..244a5fedbe 100644
--- a/compiler/utils/swap_space.cc
+++ b/compiler/utils/swap_space.cc
@@ -18,6 +18,7 @@
#include <algorithm>
#include <numeric>
+#include <sys/mman.h>
#include "base/logging.h"
#include "base/macros.h"
@@ -44,23 +45,17 @@ static void DumpFreeMap(const FreeBySizeSet& free_by_size) {
}
}
-template <typename FreeByStartSet, typename FreeBySizeSet>
-static void RemoveChunk(FreeByStartSet* free_by_start,
- FreeBySizeSet* free_by_size,
- typename FreeBySizeSet::const_iterator free_by_size_pos) {
+void SwapSpace::RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos) {
auto free_by_start_pos = free_by_size_pos->second;
- free_by_size->erase(free_by_size_pos);
- free_by_start->erase(free_by_start_pos);
+ free_by_size_.erase(free_by_size_pos);
+ free_by_start_.erase(free_by_start_pos);
}
-template <typename FreeByStartSet, typename FreeBySizeSet>
-static void InsertChunk(FreeByStartSet* free_by_start,
- FreeBySizeSet* free_by_size,
- const SpaceChunk& chunk) {
+inline void SwapSpace::InsertChunk(const SpaceChunk& chunk) {
DCHECK_NE(chunk.size, 0u);
- auto insert_result = free_by_start->insert(chunk);
+ auto insert_result = free_by_start_.insert(chunk);
DCHECK(insert_result.second);
- free_by_size->emplace(chunk.size, insert_result.first);
+ free_by_size_.emplace(chunk.size, insert_result.first);
}
SwapSpace::SwapSpace(int fd, size_t initial_size)
@@ -69,10 +64,18 @@ SwapSpace::SwapSpace(int fd, size_t initial_size)
lock_("SwapSpace lock", static_cast<LockLevel>(LockLevel::kDefaultMutexLevel - 1)) {
// Assume that the file is unlinked.
- InsertChunk(&free_by_start_, &free_by_size_, NewFileChunk(initial_size));
+ InsertChunk(NewFileChunk(initial_size));
}
SwapSpace::~SwapSpace() {
+ // Unmap all mmapped chunks. Nothing should be allocated anymore at
+ // this point, so there should be only full size chunks in free_by_start_.
+ for (const SpaceChunk& chunk : free_by_start_) {
+ if (munmap(chunk.ptr, chunk.size) != 0) {
+ PLOG(ERROR) << "Failed to unmap swap space chunk at "
+ << static_cast<const void*>(chunk.ptr) << " size=" << chunk.size;
+ }
+ }
// All arenas are backed by the same file. Just close the descriptor.
close(fd_);
}
@@ -113,7 +116,7 @@ void* SwapSpace::Alloc(size_t size) {
: free_by_size_.lower_bound(FreeBySizeEntry { size, free_by_start_.begin() });
if (it != free_by_size_.end()) {
old_chunk = *it->second;
- RemoveChunk(&free_by_start_, &free_by_size_, it);
+ RemoveChunk(it);
} else {
// Not a big enough free chunk, need to increase file size.
old_chunk = NewFileChunk(size);
@@ -124,13 +127,13 @@ void* SwapSpace::Alloc(size_t size) {
if (old_chunk.size != size) {
// Insert the remainder.
SpaceChunk new_chunk = { old_chunk.ptr + size, old_chunk.size - size };
- InsertChunk(&free_by_start_, &free_by_size_, new_chunk);
+ InsertChunk(new_chunk);
}
return ret;
}
-SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
+SwapSpace::SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
#if !defined(__APPLE__)
size_t next_part = std::max(RoundUp(min_size, kPageSize), RoundUp(kMininumMapSize, kPageSize));
int result = TEMP_FAILURE_RETRY(ftruncate64(fd_, size_ + next_part));
@@ -159,7 +162,7 @@ SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
}
// TODO: Full coalescing.
-void SwapSpace::Free(void* ptrV, size_t size) {
+void SwapSpace::Free(void* ptr, size_t size) {
MutexLock lock(Thread::Current(), lock_);
size = RoundUp(size, 8U);
@@ -168,7 +171,7 @@ void SwapSpace::Free(void* ptrV, size_t size) {
free_before = CollectFree(free_by_start_, free_by_size_);
}
- SpaceChunk chunk = { reinterpret_cast<uint8_t*>(ptrV), size };
+ SpaceChunk chunk = { reinterpret_cast<uint8_t*>(ptr), size };
auto it = free_by_start_.lower_bound(chunk);
if (it != free_by_start_.begin()) {
auto prev = it;
@@ -180,7 +183,7 @@ void SwapSpace::Free(void* ptrV, size_t size) {
chunk.ptr -= prev->size;
auto erase_pos = free_by_size_.find(FreeBySizeEntry { prev->size, prev });
DCHECK(erase_pos != free_by_size_.end());
- RemoveChunk(&free_by_start_, &free_by_size_, erase_pos);
+ RemoveChunk(erase_pos);
// "prev" is invalidated but "it" remains valid.
}
}
@@ -191,11 +194,11 @@ void SwapSpace::Free(void* ptrV, size_t size) {
chunk.size += it->size;
auto erase_pos = free_by_size_.find(FreeBySizeEntry { it->size, it });
DCHECK(erase_pos != free_by_size_.end());
- RemoveChunk(&free_by_start_, &free_by_size_, erase_pos);
+ RemoveChunk(erase_pos);
// "it" is invalidated but we don't need it anymore.
}
}
- InsertChunk(&free_by_start_, &free_by_size_, chunk);
+ InsertChunk(chunk);
if (kCheckFreeMaps) {
size_t free_after = CollectFree(free_by_start_, free_by_size_);
diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h
index 9127b6b096..b659f1d3c7 100644
--- a/compiler/utils/swap_space.h
+++ b/compiler/utils/swap_space.h
@@ -19,42 +19,17 @@
#include <cstdlib>
#include <list>
+#include <vector>
#include <set>
#include <stdint.h>
#include <stddef.h>
-#include "base/debug_stack.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
-#include "mem_map.h"
namespace art {
-// Chunk of space.
-struct SpaceChunk {
- uint8_t* ptr;
- size_t size;
-
- uintptr_t Start() const {
- return reinterpret_cast<uintptr_t>(ptr);
- }
- uintptr_t End() const {
- return reinterpret_cast<uintptr_t>(ptr) + size;
- }
-};
-
-inline bool operator==(const SpaceChunk& lhs, const SpaceChunk& rhs) {
- return (lhs.size == rhs.size) && (lhs.ptr == rhs.ptr);
-}
-
-class SortChunkByPtr {
- public:
- bool operator()(const SpaceChunk& a, const SpaceChunk& b) const {
- return reinterpret_cast<uintptr_t>(a.ptr) < reinterpret_cast<uintptr_t>(b.ptr);
- }
-};
-
// An arena pool that creates arenas backed by an mmaped file.
class SwapSpace {
public:
@@ -68,17 +43,27 @@ class SwapSpace {
}
private:
- SpaceChunk NewFileChunk(size_t min_size) REQUIRES(lock_);
+ // Chunk of space.
+ struct SpaceChunk {
+ uint8_t* ptr;
+ size_t size;
- int fd_;
- size_t size_;
- std::list<SpaceChunk> maps_;
+ uintptr_t Start() const {
+ return reinterpret_cast<uintptr_t>(ptr);
+ }
+ uintptr_t End() const {
+ return reinterpret_cast<uintptr_t>(ptr) + size;
+ }
+ };
- // NOTE: Boost.Bimap would be useful for the two following members.
+ class SortChunkByPtr {
+ public:
+ bool operator()(const SpaceChunk& a, const SpaceChunk& b) const {
+ return reinterpret_cast<uintptr_t>(a.ptr) < reinterpret_cast<uintptr_t>(b.ptr);
+ }
+ };
- // Map start of a free chunk to its size.
typedef std::set<SpaceChunk, SortChunkByPtr> FreeByStartSet;
- FreeByStartSet free_by_start_ GUARDED_BY(lock_);
// Map size to an iterator to free_by_start_'s entry.
typedef std::pair<size_t, FreeByStartSet::const_iterator> FreeBySizeEntry;
@@ -92,6 +77,21 @@ class SwapSpace {
}
};
typedef std::set<FreeBySizeEntry, FreeBySizeComparator> FreeBySizeSet;
+
+ SpaceChunk NewFileChunk(size_t min_size) REQUIRES(lock_);
+
+ void RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos) REQUIRES(lock_);
+ void InsertChunk(const SpaceChunk& chunk) REQUIRES(lock_);
+
+ int fd_;
+ size_t size_;
+ std::list<SpaceChunk> maps_;
+
+ // NOTE: Boost.Bimap would be useful for the two following members.
+
+ // Map start of a free chunk to its size.
+ FreeByStartSet free_by_start_ GUARDED_BY(lock_);
+ // Free chunks ordered by size.
FreeBySizeSet free_by_size_ GUARDED_BY(lock_);
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -126,6 +126,9 @@ class SwapAllocator<void> {
template <typename U>
friend class SwapAllocator;
+
+ template <typename U>
+ friend bool operator==(const SwapAllocator<U>& lhs, const SwapAllocator<U>& rhs);
};
template <typename T>
@@ -201,9 +204,22 @@ class SwapAllocator {
template <typename U>
friend class SwapAllocator;
+
+ template <typename U>
+ friend bool operator==(const SwapAllocator<U>& lhs, const SwapAllocator<U>& rhs);
};
template <typename T>
+inline bool operator==(const SwapAllocator<T>& lhs, const SwapAllocator<T>& rhs) {
+ return lhs.swap_space_ == rhs.swap_space_;
+}
+
+template <typename T>
+inline bool operator!=(const SwapAllocator<T>& lhs, const SwapAllocator<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+template <typename T>
using SwapVector = std::vector<T, SwapAllocator<T>>;
template <typename T, typename Comparator>
using SwapSet = std::set<T, Comparator, SwapAllocator<T>>;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 50480d9043..808643a6a6 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -63,6 +63,7 @@
#include "gc/space/space-inl.h"
#include "image_writer.h"
#include "interpreter/unstarted_runtime.h"
+#include "jit/offline_profiling_info.h"
#include "leb128.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -70,6 +71,7 @@
#include "mirror/object_array-inl.h"
#include "oat_writer.h"
#include "os.h"
+#include "profile_assistant.h"
#include "runtime.h"
#include "runtime_options.h"
#include "ScopedLocalRef.h"
@@ -311,13 +313,12 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError(" -g");
UsageError(" --generate-debug-info: Generate debug information for native debugging,");
UsageError(" such as stack unwinding information, ELF symbols and DWARF sections.");
- UsageError(" This generates all the available information. Unneeded parts can be");
- UsageError(" stripped using standard command line tools such as strip or objcopy.");
- UsageError(" (enabled by default in debug builds, disabled by default otherwise)");
+ UsageError(" If used without --native-debuggable, it will be best-effort only.");
+ UsageError(" This option does not affect the generated code. (disabled by default)");
UsageError("");
UsageError(" --no-generate-debug-info: Do not generate debug information for native debugging.");
UsageError("");
- UsageError(" --debuggable: Produce code debuggable with Java debugger. Implies -g.");
+ UsageError(" --debuggable: Produce code debuggable with Java debugger.");
UsageError("");
UsageError(" --native-debuggable: Produce code debuggable with native debugger (like LLDB).");
UsageError(" Implies --debuggable.");
@@ -328,6 +329,16 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError(" Example: --runtime-arg -Xms256m");
UsageError("");
UsageError(" --profile-file=<filename>: specify profiler output file to use for compilation.");
+ UsageError(" Can be specified multiple time, in which case the data from the different");
+ UsageError(" profiles will be aggregated.");
+ UsageError("");
+ UsageError(" --reference-profile-file=<filename>: specify a reference profile file to use when");
+ UsageError(" compiling. The data in this file will be compared with the data in the");
+ UsageError(" associated --profile-file and the compilation will proceed only if there is");
+ UsageError(" a significant difference (--reference-profile-file is paired with");
+ UsageError(" --profile-file in the natural order). If the compilation was attempted then");
+ UsageError(" --profile-file will be merged into --reference-profile-file. Valid only when");
+ UsageError(" specified together with --profile-file.");
UsageError("");
UsageError(" --print-pass-names: print a list of pass names");
UsageError("");
@@ -767,6 +778,13 @@ class Dex2Oat FINAL {
}
}
+ if (!profile_files_.empty()) {
+ if (!reference_profile_files_.empty() &&
+ (reference_profile_files_.size() != profile_files_.size())) {
+ Usage("If specified, --reference-profile-file should match the number of --profile-file.");
+ }
+ }
+
if (!parser_options->oat_symbols.empty()) {
oat_unstripped_ = std::move(parser_options->oat_symbols);
}
@@ -1057,8 +1075,10 @@ class Dex2Oat FINAL {
} else if (option.starts_with("--compiler-backend=")) {
ParseCompilerBackend(option, parser_options.get());
} else if (option.starts_with("--profile-file=")) {
- profile_file_ = option.substr(strlen("--profile-file=")).data();
- VLOG(compiler) << "dex2oat: profile file is " << profile_file_;
+ profile_files_.push_back(option.substr(strlen("--profile-file=")).ToString());
+ } else if (option.starts_with("--reference-profile-file=")) {
+ reference_profile_files_.push_back(
+ option.substr(strlen("--reference-profile-file=")).ToString());
} else if (option == "--no-profile-file") {
// No profile
} else if (option == "--host") {
@@ -1479,9 +1499,8 @@ class Dex2Oat FINAL {
dump_cfg_append_,
compiler_phases_timings_.get(),
swap_fd_,
- profile_file_,
- &dex_file_oat_filename_map_));
-
+ &dex_file_oat_filename_map_,
+ profile_compilation_info_.get()));
driver_->SetDexFilesForOatFile(dex_files_);
driver_->CompileAll(class_loader, dex_files_, timings_);
}
@@ -1569,7 +1588,6 @@ class Dex2Oat FINAL {
std::vector<gc::space::ImageSpace*> image_spaces =
Runtime::Current()->GetHeap()->GetBootImageSpaces();
for (gc::space::ImageSpace* image_space : image_spaces) {
- // TODO: IS THIS IN ORDER? JUST TAKE THE LAST ONE?
image_base_ = std::max(image_base_, RoundUp(
reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatFileEnd()),
kPageSize));
@@ -1790,6 +1808,26 @@ class Dex2Oat FINAL {
return is_host_;
}
+ bool UseProfileGuidedCompilation() const {
+ return !profile_files_.empty();
+ }
+
+ bool ProcessProfiles() {
+ DCHECK(UseProfileGuidedCompilation());
+ ProfileCompilationInfo* info = nullptr;
+ if (ProfileAssistant::ProcessProfiles(profile_files_, reference_profile_files_, &info)) {
+ profile_compilation_info_.reset(info);
+ return true;
+ }
+ return false;
+ }
+
+ bool ShouldCompileBasedOnProfiles() const {
+ DCHECK(UseProfileGuidedCompilation());
+ // If we are given profiles, compile only if we have new information.
+ return profile_compilation_info_ != nullptr;
+ }
+
private:
template <typename T>
static std::vector<T*> MakeNonOwningPointerVector(const std::vector<std::unique_ptr<T>>& src) {
@@ -2263,7 +2301,9 @@ class Dex2Oat FINAL {
int swap_fd_;
std::string app_image_file_name_;
int app_image_fd_;
- std::string profile_file_; // Profile file to use
+ std::vector<std::string> profile_files_;
+ std::vector<std::string> reference_profile_files_;
+ std::unique_ptr<ProfileCompilationInfo> profile_compilation_info_;
TimingLogger* timings_;
std::unique_ptr<CumulativeLogger> compiler_phases_timings_;
std::vector<std::vector<const DexFile*>> dex_files_per_oat_file_;
@@ -2380,6 +2420,20 @@ static int dex2oat(int argc, char** argv) {
// Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
dex2oat.ParseArgs(argc, argv);
+ // Process profile information and assess if we need to do a profile guided compilation.
+ // This operation involves I/O.
+ if (dex2oat.UseProfileGuidedCompilation()) {
+ if (dex2oat.ProcessProfiles()) {
+ if (!dex2oat.ShouldCompileBasedOnProfiles()) {
+ LOG(INFO) << "Skipped compilation because of insignificant profile delta";
+ return EXIT_SUCCESS;
+ }
+ } else {
+ LOG(WARNING) << "Failed to process profile files";
+ return EXIT_FAILURE;
+ }
+ }
+
// Check early that the result of compilation can be written
if (!dex2oat.OpenFile()) {
return EXIT_FAILURE;
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index ee7b21ced7..f9226878a9 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -56,7 +56,7 @@ static const MipsInstruction gMipsInstructions[] = {
// R-type instructions.
{ kRTypeMask, 0, "sll", "DTA", },
// 0, 1, movci
- { kRTypeMask, 2, "srl", "DTA", },
+ { kRTypeMask | (0x1f << 21), 2, "srl", "DTA", },
{ kRTypeMask, 3, "sra", "DTA", },
{ kRTypeMask | (0x1f << 6), 4, "sllv", "DTS", },
{ kRTypeMask | (0x1f << 6), 6, "srlv", "DTS", },
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 3037652c4c..d836532ed2 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -540,7 +540,7 @@ void PatchOat::PatchInternedStrings(const ImageHeader* image_header) {
// Note that we require that ReadFromMemory does not make an internal copy of the elements.
// This also relies on visit roots not doing any verification which could fail after we update
// the roots to be the image addresses.
- temp_table.ReadFromMemory(image_->Begin() + section.Offset());
+ temp_table.AddTableFromMemory(image_->Begin() + section.Offset());
FixupRootVisitor visitor(this);
temp_table.VisitRoots(&visitor, kVisitRootFlagAllRoots);
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 14e5ec9cfe..04645d16d2 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -250,6 +250,8 @@ LIBART_TARGET_SRC_FILES := \
thread_android.cc
LIBART_TARGET_SRC_FILES_arm := \
+ interpreter/mterp/mterp.cc \
+ interpreter/mterp/out/mterp_arm.S \
arch/arm/context_arm.cc.arm \
arch/arm/entrypoints_init_arm.cc \
arch/arm/instruction_set_features_assembly_tests.S \
@@ -261,6 +263,7 @@ LIBART_TARGET_SRC_FILES_arm := \
arch/arm/fault_handler_arm.cc
LIBART_TARGET_SRC_FILES_arm64 := \
+ interpreter/mterp/mterp_stub.cc \
arch/arm64/context_arm64.cc \
arch/arm64/entrypoints_init_arm64.cc \
arch/arm64/jni_entrypoints_arm64.S \
@@ -271,6 +274,7 @@ LIBART_TARGET_SRC_FILES_arm64 := \
arch/arm64/fault_handler_arm64.cc
LIBART_SRC_FILES_x86 := \
+ interpreter/mterp/mterp_stub.cc \
arch/x86/context_x86.cc \
arch/x86/entrypoints_init_x86.cc \
arch/x86/jni_entrypoints_x86.S \
@@ -285,6 +289,7 @@ LIBART_TARGET_SRC_FILES_x86 := \
# Note that the fault_handler_x86.cc is not a mistake. This file is
# shared between the x86 and x86_64 architectures.
LIBART_SRC_FILES_x86_64 := \
+ interpreter/mterp/mterp_stub.cc \
arch/x86_64/context_x86_64.cc \
arch/x86_64/entrypoints_init_x86_64.cc \
arch/x86_64/jni_entrypoints_x86_64.S \
@@ -298,6 +303,7 @@ LIBART_TARGET_SRC_FILES_x86_64 := \
$(LIBART_SRC_FILES_x86_64) \
LIBART_TARGET_SRC_FILES_mips := \
+ interpreter/mterp/mterp_stub.cc \
arch/mips/context_mips.cc \
arch/mips/entrypoints_init_mips.cc \
arch/mips/jni_entrypoints_mips.S \
@@ -307,6 +313,7 @@ LIBART_TARGET_SRC_FILES_mips := \
arch/mips/fault_handler_mips.cc
LIBART_TARGET_SRC_FILES_mips64 := \
+ interpreter/mterp/mterp_stub.cc \
arch/mips64/context_mips64.cc \
arch/mips64/entrypoints_init_mips64.cc \
arch/mips64/jni_entrypoints_mips64.S \
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 0691f2a620..699ab3e65a 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1312,7 +1312,114 @@ END \name
.endm
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALL_ALLOC_ENTRYPOINTS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
+ENTRY art_quick_alloc_object_rosalloc
+
+ # Fast path rosalloc allocation
+ # a0: type_idx
+ # a1: ArtMethod*
+ # s1: Thread::Current
+ # -----------------------------
+ # t0: class
+ # t1: object size
+ # t2: rosalloc run
+ # t3: thread stack top offset
+ # t4: thread stack bottom offset
+ # v0: free list head
+ #
+ # t5, t6 : temps
+
+ lw $t0, ART_METHOD_DEX_CACHE_TYPES_OFFSET_32($a1) # Load dex cache resolved types
+ # array.
+
+ sll $t5, $a0, COMPRESSED_REFERENCE_SIZE_SHIFT # Shift the value.
+ addu $t5, $t0, $t5 # Compute the index.
+ lw $t0, 0($t5) # Load class (t0).
+ beqz $t0, .Lart_quick_alloc_object_rosalloc_slow_path
+
+ li $t6, MIRROR_CLASS_STATUS_INITIALIZED
+ lw $t5, MIRROR_CLASS_STATUS_OFFSET($t0) # Check class status.
+ bne $t5, $t6, .Lart_quick_alloc_object_rosalloc_slow_path
+
+ # Add a fake dependence from the following access flag and size loads to the status load. This
+ # is to prevent those loads from being reordered above the status load and reading wrong values.
+ xor $t5, $t5, $t5
+ addu $t0, $t0, $t5
+
+ lw $t5, MIRROR_CLASS_ACCESS_FLAGS_OFFSET($t0) # Check if access flags has
+ li $t6, ACCESS_FLAGS_CLASS_IS_FINALIZABLE # kAccClassIsFinalizable.
+ and $t6, $t5, $t6
+ bnez $t6, .Lart_quick_alloc_object_rosalloc_slow_path
+
+ lw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation
+ lw $t4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # stack has any room left.
+ bgeu $t3, $t4, .Lart_quick_alloc_object_rosalloc_slow_path
+
+ lw $t1, MIRROR_CLASS_OBJECT_SIZE_OFFSET($t0) # Load object size (t1).
+ li $t5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
+ # allocation.
+ bgtu $t1, $t5, .Lart_quick_alloc_object_rosalloc_slow_path
+
+ # Compute the rosalloc bracket index from the size. Allign up the size by the rosalloc bracket
+ # quantum size and divide by the quantum size and subtract by 1.
+
+ addiu $t1, $t1, -1 # Decrease obj size and shift right
+ srl $t1, $t1, ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT # by quantum.
+
+ sll $t2, $t1, POINTER_SIZE_SHIFT
+ addu $t2, $t2, $s1
+ lw $t2, THREAD_ROSALLOC_RUNS_OFFSET($t2) # Load rosalloc run (t2).
+
+ # Load the free list head (v0).
+ # NOTE: this will be the return val.
+
+ lw $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
+ beqz $v0, .Lart_quick_alloc_object_rosalloc_slow_path
+ nop
+
+ # Load the next pointer of the head and update the list head with the next pointer.
+
+ lw $t5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
+ sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
+
+ # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
+ # asserted to match.
+
+#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
+#error "Class pointer needs to overwrite next pointer."
+#endif
+
+ POISON_HEAP_REF $t0
+ sw $t0, MIRROR_OBJECT_CLASS_OFFSET($v0)
+
+ # Push the new object onto the thread local allocation stack and increment the thread local
+ # allocation stack top.
+
+ sw $v0, 0($t3)
+ addiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
+ sw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
+
+ # Decrement the size of the free list.
+
+ lw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
+ addiu $t5, $t5, -1
+ sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
+
+ sync # Fence.
+
+ jalr $zero, $ra
+ nop
+
+ .Lart_quick_alloc_object_rosalloc_slow_path:
+
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ jal artAllocObjectFromCodeRosAlloc
+ move $a2 ,$s1 # Pass self as argument.
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+
+END art_quick_alloc_object_rosalloc
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 2cb2212210..7170f73e10 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -378,8 +378,8 @@ class StubTest : public CommonRuntimeTest {
"memory"); // clobber.
#elif defined(__mips__) && defined(__LP64__)
__asm__ __volatile__ (
- // Spill a0-a7 which we say we don't clobber. May contain args.
- "daddiu $sp, $sp, -64\n\t"
+ // Spill a0-a7 and t0-t3 which we say we don't clobber. May contain args.
+ "daddiu $sp, $sp, -96\n\t"
"sd $a0, 0($sp)\n\t"
"sd $a1, 8($sp)\n\t"
"sd $a2, 16($sp)\n\t"
@@ -388,6 +388,10 @@ class StubTest : public CommonRuntimeTest {
"sd $a5, 40($sp)\n\t"
"sd $a6, 48($sp)\n\t"
"sd $a7, 56($sp)\n\t"
+ "sd $t0, 64($sp)\n\t"
+ "sd $t1, 72($sp)\n\t"
+ "sd $t2, 80($sp)\n\t"
+ "sd $t3, 88($sp)\n\t"
"daddiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
"sd %[referrer], 0($sp)\n\t"
@@ -423,13 +427,17 @@ class StubTest : public CommonRuntimeTest {
"ld $a5, 40($sp)\n\t"
"ld $a6, 48($sp)\n\t"
"ld $a7, 56($sp)\n\t"
- "daddiu $sp, $sp, 64\n\t"
+ "ld $t0, 64($sp)\n\t"
+ "ld $t1, 72($sp)\n\t"
+ "ld $t2, 80($sp)\n\t"
+ "ld $t3, 88($sp)\n\t"
+ "daddiu $sp, $sp, 96\n\t"
"move %[result], $v0\n\t" // Store the call result.
: [result] "=r" (result)
: [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
[referrer] "r"(referrer), [hidden] "r"(hidden)
- : "at", "v0", "v1", "t0", "t1", "t2", "t3", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"t8", "t9", "k0", "k1", "fp", "ra",
"$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
"$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index cf548ada33..a5f5c49068 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -225,8 +225,7 @@ inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
}
case kSuper:
// Constructors and static methods are called with invoke-direct.
- // Interface methods cannot be invoked with invoke-super.
- return IsConstructor() || IsStatic() || GetDeclaringClass()->IsInterface();
+ return IsConstructor() || IsStatic();
case kInterface: {
mirror::Class* methods_class = GetDeclaringClass();
return IsDirect() || !(methods_class->IsInterface() || methods_class->IsObjectClass());
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 8efad8876a..0be2fa20ac 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -267,7 +267,9 @@ class ArtMethod FINAL {
bool HasSameNameAndSignature(ArtMethod* other) SHARED_REQUIRES(Locks::mutator_lock_);
// Find the method that this method overrides.
- ArtMethod* FindOverriddenMethod(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* FindOverriddenMethod(size_t pointer_size)
+ REQUIRES(Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Find the method index for this method within other_dexfile. If this method isn't present then
// return DexFile::kDexNoIndex. The name_and_signature_idx MUST refer to a MethodId with the same
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 2b4826eea0..31610a3d71 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -133,8 +133,20 @@ ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.mterp_current_ibase.
+#define THREAD_CURRENT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 3 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
+ art::Thread::MterpCurrentIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.mterp_default_ibase.
+#define THREAD_DEFAULT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 4 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_DEFAULT_IBASE_OFFSET,
+ art::Thread::MterpDefaultIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.mterp_alt_ibase.
+#define THREAD_ALT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 5 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_ALT_IBASE_OFFSET,
+ art::Thread::MterpAltIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.rosalloc_runs.
-#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_LOCAL_POS_OFFSET + 3 * __SIZEOF_POINTER__)
+#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_LOCAL_POS_OFFSET + 6 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_ROSALLOC_RUNS_OFFSET,
art::Thread::RosAllocRunsOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_top.
@@ -146,6 +158,40 @@ ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET,
ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
art::Thread::ThreadLocalAllocStackEndOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offsets within ShadowFrame.
+#define SHADOWFRAME_LINK_OFFSET 0
+ADD_TEST_EQ(SHADOWFRAME_LINK_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::LinkOffset()))
+#define SHADOWFRAME_METHOD_OFFSET (SHADOWFRAME_LINK_OFFSET + 1 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(SHADOWFRAME_METHOD_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::MethodOffset()))
+#define SHADOWFRAME_RESULT_REGISTER_OFFSET (SHADOWFRAME_LINK_OFFSET + 2 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(SHADOWFRAME_RESULT_REGISTER_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::ResultRegisterOffset()))
+#define SHADOWFRAME_DEX_PC_PTR_OFFSET (SHADOWFRAME_LINK_OFFSET + 3 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(SHADOWFRAME_DEX_PC_PTR_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::DexPCPtrOffset()))
+#define SHADOWFRAME_CODE_ITEM_OFFSET (SHADOWFRAME_LINK_OFFSET + 4 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(SHADOWFRAME_CODE_ITEM_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::CodeItemOffset()))
+#define SHADOWFRAME_LOCK_COUNT_DATA_OFFSET (SHADOWFRAME_LINK_OFFSET + 5 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(SHADOWFRAME_LOCK_COUNT_DATA_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::LockCountDataOffset()))
+#define SHADOWFRAME_NUMBER_OF_VREGS_OFFSET (SHADOWFRAME_LINK_OFFSET + 6 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::NumberOfVRegsOffset()))
+#define SHADOWFRAME_DEX_PC_OFFSET (SHADOWFRAME_NUMBER_OF_VREGS_OFFSET + 4)
+ADD_TEST_EQ(SHADOWFRAME_DEX_PC_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::DexPCOffset()))
+#define SHADOWFRAME_VREGS_OFFSET (SHADOWFRAME_NUMBER_OF_VREGS_OFFSET + 8)
+ADD_TEST_EQ(SHADOWFRAME_VREGS_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::VRegsOffset()))
+
+// Offsets within CodeItem
+#define CODEITEM_INSNS_OFFSET 16
+ADD_TEST_EQ(CODEITEM_INSNS_OFFSET,
+ static_cast<int32_t>(OFFSETOF_MEMBER(art::DexFile::CodeItem, insns_)))
+
// Offsets within java.lang.Object.
#define MIRROR_OBJECT_CLASS_OFFSET 0
ADD_TEST_EQ(MIRROR_OBJECT_CLASS_OFFSET, art::mirror::Object::ClassOffset().Int32Value())
@@ -188,6 +234,26 @@ ADD_TEST_EQ(MIRROR_ARRAY_LENGTH_OFFSET, art::mirror::Array::LengthOffset().Int32
ADD_TEST_EQ(MIRROR_CHAR_ARRAY_DATA_OFFSET,
art::mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value())
+#define MIRROR_BOOLEAN_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
+ADD_TEST_EQ(MIRROR_BOOLEAN_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(uint8_t)).Int32Value())
+
+#define MIRROR_BYTE_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
+ADD_TEST_EQ(MIRROR_BYTE_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(int8_t)).Int32Value())
+
+#define MIRROR_SHORT_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
+ADD_TEST_EQ(MIRROR_SHORT_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(int16_t)).Int32Value())
+
+#define MIRROR_INT_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
+ADD_TEST_EQ(MIRROR_INT_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(int32_t)).Int32Value())
+
+#define MIRROR_WIDE_ARRAY_DATA_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_WIDE_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(uint64_t)).Int32Value())
+
#define MIRROR_OBJECT_ARRAY_DATA_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_OBJECT_ARRAY_DATA_OFFSET,
art::mirror::Array::DataOffset(
@@ -299,6 +365,12 @@ ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET,
// Assert this so that we can avoid zeroing the next field by installing the class pointer.
ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET, MIRROR_OBJECT_CLASS_OFFSET)
+#define THREAD_SUSPEND_REQUEST 1
+ADD_TEST_EQ(THREAD_SUSPEND_REQUEST, static_cast<int32_t>(art::kSuspendRequest))
+
+#define THREAD_CHECKPOINT_REQUEST 2
+ADD_TEST_EQ(THREAD_CHECKPOINT_REQUEST, static_cast<int32_t>(art::kCheckpointRequest))
+
#if defined(__cplusplus)
} // End of CheckAsmSupportOffsets.
#endif
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 969f5b953f..cea704627c 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -158,7 +158,7 @@ using TrackingAllocator = typename TypeStaticIf<kEnableTrackingAllocator,
template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
using AllocationTrackingMultiMap = std::multimap<
- Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>>;
+ Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>>;
template<class Key, AllocatorTag kTag, class Compare = std::less<Key>>
using AllocationTrackingSet = std::set<Key, Compare, TrackingAllocator<Key, kTag>>;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 70bd398415..82a5f9611c 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -855,6 +855,18 @@ void ConditionVariable::WaitHoldingLocks(Thread* self) {
PLOG(FATAL) << "futex wait failed for " << name_;
}
}
+ if (self != nullptr) {
+ JNIEnvExt* const env = self->GetJniEnv();
+ if (UNLIKELY(env != nullptr && env->runtime_deleted)) {
+ CHECK(self->IsDaemon());
+ // If the runtime has been deleted, then we cannot proceed. Just sleep forever. This may
+ // occur for user daemon threads that get a spurious wakeup. This occurs for test 132 with
+ // --host and --gdb.
+ // After we wake up, the runtime may have been shutdown, which means that this condition may
+ // have been deleted. It is not safe to retry the wait.
+ SleepForever();
+ }
+ }
guard_.ExclusiveLock(self);
CHECK_GE(num_waiters_, 0);
num_waiters_--;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 263f50de51..f674a6f3c8 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -75,6 +75,7 @@ enum LockLevel {
kReferenceQueueWeakReferencesLock,
kReferenceQueueClearedReferencesLock,
kReferenceProcessorLock,
+ kJitDebugInterfaceLock,
kJitCodeCacheLock,
kAllocSpaceLock,
kBumpPointerSpaceBlockLock,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d995f28c90..3d0629554b 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -46,6 +46,7 @@
#include "dex_file-inl.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/runtime_asm_entrypoints.h"
+#include "experimental_flags.h"
#include "gc_root-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap.h"
@@ -736,7 +737,7 @@ void ClassLinker::RunRootClinits() {
static void SanityCheckArtMethod(ArtMethod* m,
mirror::Class* expected_class,
- std::vector<gc::space::ImageSpace*>& spaces)
+ const std::vector<gc::space::ImageSpace*>& spaces)
SHARED_REQUIRES(Locks::mutator_lock_) {
if (m->IsRuntimeMethod()) {
CHECK(m->GetDeclaringClass() == nullptr) << PrettyMethod(m);
@@ -760,7 +761,7 @@ static void SanityCheckArtMethod(ArtMethod* m,
static void SanityCheckArtMethodPointerArray(mirror::PointerArray* arr,
mirror::Class* expected_class,
size_t pointer_size,
- std::vector<gc::space::ImageSpace*>& spaces)
+ const std::vector<gc::space::ImageSpace*>& spaces)
SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(arr != nullptr);
for (int32_t j = 0; j < arr->GetLength(); ++j) {
@@ -778,7 +779,7 @@ static void SanityCheckArtMethodPointerArray(mirror::PointerArray* arr,
static void SanityCheckArtMethodPointerArray(ArtMethod** arr,
size_t size,
size_t pointer_size,
- std::vector<gc::space::ImageSpace*>& spaces)
+ const std::vector<gc::space::ImageSpace*>& spaces)
SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK_EQ(arr != nullptr, size != 0u);
if (arr != nullptr) {
@@ -1113,6 +1114,7 @@ bool ClassLinker::InitFromImage(std::string* error_msg) {
mirror::Throwable::SetClass(GetClassRoot(kJavaLangThrowable));
mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement));
+ size_t class_tables_added = 0;
for (gc::space::ImageSpace* space : spaces) {
const ImageHeader& header = space->GetImageHeader();
const ImageSection& section = header.GetImageSection(ImageHeader::kSectionClassTable);
@@ -1120,9 +1122,17 @@ bool ClassLinker::InitFromImage(std::string* error_msg) {
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
ClassTable* const class_table = InsertClassTableForClassLoader(nullptr);
class_table->ReadFromMemory(space->Begin() + section.Offset());
- dex_cache_boot_image_class_lookup_required_ = false;
+ ++class_tables_added;
}
}
+ if (class_tables_added != 0) {
+ // Either all of the image spaces have an empty class section or none do. In the case where
+ // an image space has no classes, it will still have a non-empty class section that contains
+ // metadata.
+ CHECK_EQ(spaces.size(), class_tables_added)
+ << "Expected non-empty class section for each image space.";
+ dex_cache_boot_image_class_lookup_required_ = false;
+ }
FinishInit(self);
@@ -4816,7 +4826,6 @@ bool ClassLinker::LinkVirtualMethods(
return false;
}
bool has_defaults = false;
- // TODO May need to replace this with real VTable for invoke_super
// Assign each method an IMT index and set the default flag.
for (size_t i = 0; i < num_virtual_methods; ++i) {
ArtMethod* m = klass->GetVirtualMethodDuringLinking(i, image_pointer_size_);
@@ -5454,6 +5463,53 @@ static void SanityCheckVTable(Handle<mirror::Class> klass, uint32_t pointer_size
}
}
+static void FillImtFromSuperClass(Handle<mirror::Class> klass,
+ Handle<mirror::IfTable> iftable,
+ ArtMethod* unimplemented_method,
+ ArtMethod* imt_conflict_method,
+ ArtMethod** out_imt,
+ size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(klass->HasSuperClass());
+ mirror::Class* super_class = klass->GetSuperClass();
+ if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ out_imt[i] = super_class->GetEmbeddedImTableEntry(i, pointer_size);
+ }
+ } else {
+ // No imt in the super class, need to reconstruct from the iftable.
+ mirror::IfTable* if_table = super_class->GetIfTable();
+ const size_t length = super_class->GetIfTableCount();
+ for (size_t i = 0; i < length; ++i) {
+ mirror::Class* interface = iftable->GetInterface(i);
+ const size_t num_virtuals = interface->NumDeclaredVirtualMethods();
+ const size_t method_array_count = if_table->GetMethodArrayCount(i);
+ DCHECK_EQ(num_virtuals, method_array_count);
+ if (method_array_count == 0) {
+ continue;
+ }
+ auto* method_array = if_table->GetMethodArray(i);
+ for (size_t j = 0; j < num_virtuals; ++j) {
+ auto method = method_array->GetElementPtrSize<ArtMethod*>(j, pointer_size);
+ DCHECK(method != nullptr) << PrettyClass(super_class);
+ // Miranda methods cannot be used to implement an interface method and defaults should be
+ // skipped in case we override it.
+ if (method->IsDefault() || method->IsMiranda()) {
+ continue;
+ }
+ ArtMethod* interface_method = interface->GetVirtualMethod(j, pointer_size);
+ uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
+ auto** imt_ref = &out_imt[imt_index];
+ if (*imt_ref == unimplemented_method) {
+ *imt_ref = method;
+ } else if (*imt_ref != imt_conflict_method) {
+ *imt_ref = imt_conflict_method;
+ }
+ }
+ }
+ }
+}
+
+// TODO This method needs to be split up into several smaller methods.
bool ClassLinker::LinkInterfaceMethods(
Thread* self,
Handle<mirror::Class> klass,
@@ -5461,7 +5517,19 @@ bool ClassLinker::LinkInterfaceMethods(
ArtMethod** out_imt) {
StackHandleScope<3> hs(self);
Runtime* const runtime = Runtime::Current();
+
+ const bool is_interface = klass->IsInterface();
+ // TODO It might in the future prove useful to make interfaces have full iftables, allowing a
+ // faster invoke-super implementation in the interpreter/across dex-files.
+ // We will just skip doing any of this on non-debug builds for speed.
+ if (is_interface &&
+ !kIsDebugBuild &&
+ !runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods)) {
+ return true;
+ }
+
const bool has_superclass = klass->HasSuperClass();
+ const bool fill_tables = !is_interface;
const size_t super_ifcount = has_superclass ? klass->GetSuperClass()->GetIfTableCount() : 0U;
const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_);
const size_t method_size = ArtMethod::Size(image_pointer_size_);
@@ -5469,10 +5537,6 @@ bool ClassLinker::LinkInterfaceMethods(
MutableHandle<mirror::IfTable> iftable(hs.NewHandle(klass->GetIfTable()));
- // If we're an interface, we don't need the vtable pointers, so we're done.
- if (klass->IsInterface()) {
- return true;
- }
// These are allocated on the heap to begin, we then transfer to linear alloc when we re-create
// the virtual methods array.
// Need to use low 4GB arenas for compiler or else the pointers wont fit in 32 bit method array
@@ -5489,71 +5553,42 @@ bool ClassLinker::LinkInterfaceMethods(
ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod();
ArtMethod* const imt_conflict_method = runtime->GetImtConflictMethod();
// Copy the IMT from the super class if possible.
- bool extend_super_iftable = false;
- if (has_superclass) {
- mirror::Class* super_class = klass->GetSuperClass();
- extend_super_iftable = true;
- if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
- for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
- out_imt[i] = super_class->GetEmbeddedImTableEntry(i, image_pointer_size_);
- }
- } else {
- // No imt in the super class, need to reconstruct from the iftable.
- mirror::IfTable* if_table = super_class->GetIfTable();
- const size_t length = super_class->GetIfTableCount();
- for (size_t i = 0; i < length; ++i) {
- mirror::Class* interface = iftable->GetInterface(i);
- const size_t num_virtuals = interface->NumVirtualMethods();
- const size_t method_array_count = if_table->GetMethodArrayCount(i);
- DCHECK_EQ(num_virtuals, method_array_count);
- if (method_array_count == 0) {
- continue;
- }
- auto* method_array = if_table->GetMethodArray(i);
- for (size_t j = 0; j < num_virtuals; ++j) {
- auto method = method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
- DCHECK(method != nullptr) << PrettyClass(super_class);
- // Miranda methods cannot be used to implement an interface method and defaults should be
- // skipped in case we override it.
- if (method->IsDefault() || method->IsMiranda()) {
- continue;
- }
- ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
- uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
- auto** imt_ref = &out_imt[imt_index];
- if (*imt_ref == unimplemented_method) {
- *imt_ref = method;
- } else if (*imt_ref != imt_conflict_method) {
- *imt_ref = imt_conflict_method;
- }
- }
- }
- }
+ const bool extend_super_iftable = has_superclass;
+ if (has_superclass && fill_tables) {
+ FillImtFromSuperClass(klass,
+ iftable,
+ unimplemented_method,
+ imt_conflict_method,
+ out_imt,
+ image_pointer_size_);
}
+
// Allocate method arrays before since we don't want miss visiting miranda method roots due to
// thread suspension.
- for (size_t i = 0; i < ifcount; ++i) {
- size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
- if (num_methods > 0) {
- const bool is_super = i < super_ifcount;
- // This is an interface implemented by a super-class. Therefore we can just copy the method
- // array from the superclass.
- const bool super_interface = is_super && extend_super_iftable;
- mirror::PointerArray* method_array;
- if (super_interface) {
- mirror::IfTable* if_table = klass->GetSuperClass()->GetIfTable();
- DCHECK(if_table != nullptr);
- DCHECK(if_table->GetMethodArray(i) != nullptr);
- // If we are working on a super interface, try extending the existing method array.
- method_array = down_cast<mirror::PointerArray*>(if_table->GetMethodArray(i)->Clone(self));
- } else {
- method_array = AllocPointerArray(self, num_methods);
- }
- if (UNLIKELY(method_array == nullptr)) {
- self->AssertPendingOOMException();
- return false;
+ if (fill_tables) {
+ for (size_t i = 0; i < ifcount; ++i) {
+ size_t num_methods = iftable->GetInterface(i)->NumDeclaredVirtualMethods();
+ if (num_methods > 0) {
+ const bool is_super = i < super_ifcount;
+ // This is an interface implemented by a super-class. Therefore we can just copy the method
+ // array from the superclass.
+ const bool super_interface = is_super && extend_super_iftable;
+ mirror::PointerArray* method_array;
+ if (super_interface) {
+ mirror::IfTable* if_table = klass->GetSuperClass()->GetIfTable();
+ DCHECK(if_table != nullptr);
+ DCHECK(if_table->GetMethodArray(i) != nullptr);
+ // If we are working on a super interface, try extending the existing method array.
+ method_array = down_cast<mirror::PointerArray*>(if_table->GetMethodArray(i)->Clone(self));
+ } else {
+ method_array = AllocPointerArray(self, num_methods);
+ }
+ if (UNLIKELY(method_array == nullptr)) {
+ self->AssertPendingOOMException();
+ return false;
+ }
+ iftable->SetMethodArray(i, method_array);
}
- iftable->SetMethodArray(i, method_array);
}
}
@@ -5569,12 +5604,16 @@ bool ClassLinker::LinkInterfaceMethods(
DCHECK_GE(i, 0u);
DCHECK_LT(i, ifcount);
- size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
+ size_t num_methods = iftable->GetInterface(i)->NumDeclaredVirtualMethods();
if (num_methods > 0) {
StackHandleScope<2> hs2(self);
const bool is_super = i < super_ifcount;
const bool super_interface = is_super && extend_super_iftable;
- auto method_array(hs2.NewHandle(iftable->GetMethodArray(i)));
+ // We don't actually create or fill these tables for interfaces, we just copy some methods for
+ // conflict methods. Just set this as nullptr in those cases.
+ Handle<mirror::PointerArray> method_array(fill_tables
+ ? hs2.NewHandle(iftable->GetMethodArray(i))
+ : hs2.NewHandle<mirror::PointerArray>(nullptr));
ArraySlice<ArtMethod> input_virtual_methods;
ScopedNullHandle<mirror::PointerArray> null_handle;
@@ -5587,7 +5626,7 @@ bool ClassLinker::LinkInterfaceMethods(
// at the super-classes iftable methods (copied into method_array previously) when we are
// looking for the implementation of a super-interface method but that is rather dirty.
bool using_virtuals;
- if (super_interface) {
+ if (super_interface || is_interface) {
// If we are overwriting a super class interface, try to only virtual methods instead of the
// whole vtable.
using_virtuals = true;
@@ -5597,6 +5636,7 @@ bool ClassLinker::LinkInterfaceMethods(
// For a new interface, however, we need the whole vtable in case a new
// interface method is implemented in the whole superclass.
using_virtuals = false;
+ DCHECK(vtable.Get() != nullptr);
input_vtable_array = vtable;
input_array_length = input_vtable_array->GetLength();
}
@@ -5646,13 +5686,15 @@ bool ClassLinker::LinkInterfaceMethods(
break;
} else {
found_impl = true;
- method_array->SetElementPtrSize(j, vtable_method, image_pointer_size_);
- // Place method in imt if entry is empty, place conflict otherwise.
- SetIMTRef(unimplemented_method,
- imt_conflict_method,
- image_pointer_size_,
- vtable_method,
- /*out*/imt_ptr);
+ if (LIKELY(fill_tables)) {
+ method_array->SetElementPtrSize(j, vtable_method, image_pointer_size_);
+ // Place method in imt if entry is empty, place conflict otherwise.
+ SetIMTRef(unimplemented_method,
+ imt_conflict_method,
+ image_pointer_size_,
+ vtable_method,
+ /*out*/imt_ptr);
+ }
break;
}
}
@@ -5679,7 +5721,7 @@ bool ClassLinker::LinkInterfaceMethods(
method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
DCHECK(supers_method != nullptr);
DCHECK(interface_name_comparator.HasSameNameAndSignature(supers_method));
- if (!supers_method->IsOverridableByDefaultMethod()) {
+ if (LIKELY(!supers_method->IsOverridableByDefaultMethod())) {
// The method is not overridable by a default method (i.e. it is directly implemented
// in some class). Therefore move onto the next interface method.
continue;
@@ -5701,11 +5743,13 @@ bool ClassLinker::LinkInterfaceMethods(
} else {
// See if we already have a conflict method for this method.
ArtMethod* preexisting_conflict = FindSameNameAndSignature(interface_name_comparator,
- default_conflict_methods);
+ default_conflict_methods);
if (LIKELY(preexisting_conflict != nullptr)) {
// We already have another conflict we can reuse.
default_conflict_method = preexisting_conflict;
} else {
+ // Note that we do this even if we are an interface since we need to create this and
+ // cannot reuse another classes.
// Create a new conflict method for this to use.
default_conflict_method =
reinterpret_cast<ArtMethod*>(allocator.Alloc(method_size));
@@ -5715,7 +5759,7 @@ bool ClassLinker::LinkInterfaceMethods(
}
current_method = default_conflict_method;
break;
- }
+ } // case kDefaultConflict
case DefaultMethodSearchResult::kDefaultFound: {
DCHECK(current_method != nullptr);
// Found a default method.
@@ -5724,8 +5768,12 @@ bool ClassLinker::LinkInterfaceMethods(
// We found a default method but it was the same one we already have from our
// superclass. Don't bother adding it to our vtable again.
current_method = vtable_impl;
- } else {
+ } else if (LIKELY(fill_tables)) {
+ // Interfaces don't need to copy default methods since they don't have vtables.
// Only record this default method if it is new to save space.
+ // TODO It might be worthwhile to copy default methods on interfaces anyway since it
+ // would make lookup for interface super much faster. (We would only need to scan
+ // the iftable to find if there is a NSME or AME.)
ArtMethod* old = FindSameNameAndSignature(interface_name_comparator, default_methods);
if (old == nullptr) {
// We found a default method implementation and there were no conflicts.
@@ -5736,7 +5784,7 @@ bool ClassLinker::LinkInterfaceMethods(
}
}
break;
- }
+ } // case kDefaultFound
case DefaultMethodSearchResult::kAbstractFound: {
DCHECK(current_method == nullptr);
// Abstract method masks all defaults.
@@ -5748,38 +5796,46 @@ bool ClassLinker::LinkInterfaceMethods(
current_method = vtable_impl;
}
break;
- }
+ } // case kAbstractFound
}
- if (current_method != nullptr) {
- // We found a default method implementation. Record it in the iftable and IMT.
- method_array->SetElementPtrSize(j, current_method, image_pointer_size_);
- SetIMTRef(unimplemented_method,
- imt_conflict_method,
- image_pointer_size_,
- current_method,
- /*out*/imt_ptr);
- } else if (!super_interface) {
- // We could not find an implementation for this method and since it is a brand new
- // interface we searched the entire vtable (and all default methods) for an implementation
- // but couldn't find one. We therefore need to make a miranda method.
- //
- // Find out if there is already a miranda method we can use.
- ArtMethod* miranda_method = FindSameNameAndSignature(interface_name_comparator,
- miranda_methods);
- if (miranda_method == nullptr) {
- DCHECK(interface_method->IsAbstract()) << PrettyMethod(interface_method);
- miranda_method = reinterpret_cast<ArtMethod*>(allocator.Alloc(method_size));
- CHECK(miranda_method != nullptr);
- // Point the interface table at a phantom slot.
- new(miranda_method) ArtMethod(interface_method, image_pointer_size_);
- miranda_methods.push_back(miranda_method);
+ if (LIKELY(fill_tables)) {
+ if (current_method != nullptr) {
+ // We found a default method implementation. Record it in the iftable and IMT.
+ method_array->SetElementPtrSize(j, current_method, image_pointer_size_);
+ SetIMTRef(unimplemented_method,
+ imt_conflict_method,
+ image_pointer_size_,
+ current_method,
+ /*out*/imt_ptr);
+ } else if (!super_interface) {
+ // We could not find an implementation for this method and since it is a brand new
+ // interface we searched the entire vtable (and all default methods) for an
+ // implementation but couldn't find one. We therefore need to make a miranda method.
+ //
+ // Find out if there is already a miranda method we can use.
+ ArtMethod* miranda_method = FindSameNameAndSignature(interface_name_comparator,
+ miranda_methods);
+ if (miranda_method == nullptr) {
+ DCHECK(interface_method->IsAbstract()) << PrettyMethod(interface_method);
+ miranda_method = reinterpret_cast<ArtMethod*>(allocator.Alloc(method_size));
+ CHECK(miranda_method != nullptr);
+ // Point the interface table at a phantom slot.
+ new(miranda_method) ArtMethod(interface_method, image_pointer_size_);
+ miranda_methods.push_back(miranda_method);
+ }
+ method_array->SetElementPtrSize(j, miranda_method, image_pointer_size_);
}
- method_array->SetElementPtrSize(j, miranda_method, image_pointer_size_);
}
- }
- }
- }
- if (!miranda_methods.empty() || !default_methods.empty() || !default_conflict_methods.empty()) {
+ } // For each method in interface end.
+ } // if (num_methods > 0)
+ } // For each interface.
+ const bool has_new_virtuals = !(miranda_methods.empty() &&
+ default_methods.empty() &&
+ default_conflict_methods.empty());
+ // TODO don't extend virtuals of interface unless necessary (when is it?).
+ if (has_new_virtuals) {
+ DCHECK(!is_interface || (default_methods.empty() && miranda_methods.empty()))
+ << "Interfaces should only have default-conflict methods appended to them.";
VLOG(class_linker) << PrettyClass(klass.Get()) << ": miranda_methods=" << miranda_methods.size()
<< " default_methods=" << default_methods.size()
<< " default_conflict_methods=" << default_conflict_methods.size();
@@ -5876,101 +5932,102 @@ bool ClassLinker::LinkInterfaceMethods(
// suspension assert.
self->EndAssertNoThreadSuspension(old_cause);
- const size_t old_vtable_count = vtable->GetLength();
- const size_t new_vtable_count = old_vtable_count +
- miranda_methods.size() +
- default_methods.size() +
- default_conflict_methods.size();
- miranda_methods.clear();
- vtable.Assign(down_cast<mirror::PointerArray*>(vtable->CopyOf(self, new_vtable_count)));
- if (UNLIKELY(vtable.Get() == nullptr)) {
- self->AssertPendingOOMException();
- return false;
- }
- out = methods->begin(method_size, method_alignment) + old_method_count;
- size_t vtable_pos = old_vtable_count;
- for (size_t i = old_method_count; i < new_method_count; ++i) {
- // Leave the declaring class alone as type indices are relative to it
- out->SetMethodIndex(0xFFFF & vtable_pos);
- vtable->SetElementPtrSize(vtable_pos, &*out, image_pointer_size_);
- ++out;
- ++vtable_pos;
- }
- CHECK_EQ(vtable_pos, new_vtable_count);
- // Update old vtable methods. We use the default_translations map to figure out what each vtable
- // entry should be updated to, if they need to be at all.
- for (size_t i = 0; i < old_vtable_count; ++i) {
- ArtMethod* translated_method = vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_);
- // Try and find what we need to change this method to.
- auto translation_it = default_translations.find(i);
- bool found_translation = false;
- if (translation_it != default_translations.end()) {
- if (translation_it->second.IsInConflict()) {
- // Find which conflict method we are to use for this method.
- MethodNameAndSignatureComparator old_method_comparator(
- translated_method->GetInterfaceMethodIfProxy(image_pointer_size_));
- ArtMethod* new_conflict_method = FindSameNameAndSignature(old_method_comparator,
- default_conflict_methods);
- CHECK(new_conflict_method != nullptr) << "Expected a conflict method!";
- translated_method = new_conflict_method;
- } else if (translation_it->second.IsAbstract()) {
- // Find which miranda method we are to use for this method.
- MethodNameAndSignatureComparator old_method_comparator(
- translated_method->GetInterfaceMethodIfProxy(image_pointer_size_));
- ArtMethod* miranda_method = FindSameNameAndSignature(old_method_comparator,
- miranda_methods);
- DCHECK(miranda_method != nullptr);
- translated_method = miranda_method;
+ if (fill_tables) {
+ // Update the vtable to the new method structures. We can skip this for interfaces since they
+ // do not have vtables.
+ const size_t old_vtable_count = vtable->GetLength();
+ const size_t new_vtable_count = old_vtable_count +
+ miranda_methods.size() +
+ default_methods.size() +
+ default_conflict_methods.size();
+ vtable.Assign(down_cast<mirror::PointerArray*>(vtable->CopyOf(self, new_vtable_count)));
+ if (UNLIKELY(vtable.Get() == nullptr)) {
+ self->AssertPendingOOMException();
+ return false;
+ }
+ out = methods->begin(method_size, method_alignment) + old_method_count;
+ size_t vtable_pos = old_vtable_count;
+ // Update all the newly copied method's indexes so they denote their placement in the vtable.
+ for (size_t i = old_method_count; i < new_method_count; ++i) {
+ // Leave the declaring class alone the method's dex_code_item_offset_ and dex_method_index_
+ // fields are references into the dex file the method was defined in. Since the ArtMethod
+ // does not store that information it uses declaring_class_->dex_cache_.
+ out->SetMethodIndex(0xFFFF & vtable_pos);
+ vtable->SetElementPtrSize(vtable_pos, &*out, image_pointer_size_);
+ ++out;
+ ++vtable_pos;
+ }
+ CHECK_EQ(vtable_pos, new_vtable_count);
+ // Update old vtable methods. We use the default_translations map to figure out what each
+ // vtable entry should be updated to, if they need to be at all.
+ for (size_t i = 0; i < old_vtable_count; ++i) {
+ ArtMethod* translated_method = vtable->GetElementPtrSize<ArtMethod*>(
+ i, image_pointer_size_);
+ // Try and find what we need to change this method to.
+ auto translation_it = default_translations.find(i);
+ bool found_translation = false;
+ if (translation_it != default_translations.end()) {
+ if (translation_it->second.IsInConflict()) {
+ // Find which conflict method we are to use for this method.
+ MethodNameAndSignatureComparator old_method_comparator(
+ translated_method->GetInterfaceMethodIfProxy(image_pointer_size_));
+ ArtMethod* new_conflict_method = FindSameNameAndSignature(old_method_comparator,
+ default_conflict_methods);
+ CHECK(new_conflict_method != nullptr) << "Expected a conflict method!";
+ translated_method = new_conflict_method;
+ } else if (translation_it->second.IsAbstract()) {
+ // Find which miranda method we are to use for this method.
+ MethodNameAndSignatureComparator old_method_comparator(
+ translated_method->GetInterfaceMethodIfProxy(image_pointer_size_));
+ ArtMethod* miranda_method = FindSameNameAndSignature(old_method_comparator,
+ miranda_methods);
+ DCHECK(miranda_method != nullptr);
+ translated_method = miranda_method;
+ } else {
+ // Normal default method (changed from an older default or abstract interface method).
+ DCHECK(translation_it->second.IsTranslation());
+ translated_method = translation_it->second.GetTranslation();
+ }
+ found_translation = true;
+ }
+ DCHECK(translated_method != nullptr);
+ auto it = move_table.find(translated_method);
+ if (it != move_table.end()) {
+ auto* new_method = it->second;
+ DCHECK(new_method != nullptr);
+ vtable->SetElementPtrSize(i, new_method, image_pointer_size_);
} else {
- // Normal default method (changed from an older default or abstract interface method).
- DCHECK(translation_it->second.IsTranslation());
- translated_method = translation_it->second.GetTranslation();
+ // If it was not going to be updated we wouldn't have put it into the default_translations
+ // map.
+ CHECK(!found_translation) << "We were asked to update this vtable entry. Must not fail.";
}
- found_translation = true;
- }
- DCHECK(translated_method != nullptr);
- auto it = move_table.find(translated_method);
- if (it != move_table.end()) {
- auto* new_method = it->second;
- DCHECK(new_method != nullptr);
- vtable->SetElementPtrSize(i, new_method, image_pointer_size_);
- } else {
- // If it was not going to be updated we wouldn't have put it into the default_translations
- // map.
- CHECK(!found_translation) << "We were asked to update this vtable entry. Must not fail.";
}
- }
-
- if (kIsDebugBuild) {
- for (size_t i = 0; i < new_vtable_count; ++i) {
- CHECK(move_table.find(vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_)) ==
- move_table.end());
+ klass->SetVTable(vtable.Get());
+
+ // Go fix up all the stale iftable pointers.
+ for (size_t i = 0; i < ifcount; ++i) {
+ for (size_t j = 0, count = iftable->GetMethodArrayCount(i); j < count; ++j) {
+ auto* method_array = iftable->GetMethodArray(i);
+ auto* m = method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
+ DCHECK(m != nullptr) << PrettyClass(klass.Get());
+ auto it = move_table.find(m);
+ if (it != move_table.end()) {
+ auto* new_m = it->second;
+ DCHECK(new_m != nullptr) << PrettyClass(klass.Get());
+ method_array->SetElementPtrSize(j, new_m, image_pointer_size_);
+ }
+ }
}
- }
- klass->SetVTable(vtable.Get());
- // Go fix up all the stale (old miranda or default method) pointers.
- // First do it on the iftable.
- for (size_t i = 0; i < ifcount; ++i) {
- for (size_t j = 0, count = iftable->GetMethodArrayCount(i); j < count; ++j) {
- auto* method_array = iftable->GetMethodArray(i);
- auto* m = method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
- DCHECK(m != nullptr) << PrettyClass(klass.Get());
- auto it = move_table.find(m);
+ // Fix up IMT next
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ auto it = move_table.find(out_imt[i]);
if (it != move_table.end()) {
- auto* new_m = it->second;
- DCHECK(new_m != nullptr) << PrettyClass(klass.Get());
- method_array->SetElementPtrSize(j, new_m, image_pointer_size_);
+ out_imt[i] = it->second;
}
}
}
- // Fix up IMT next
- for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
- auto it = move_table.find(out_imt[i]);
- if (it != move_table.end()) {
- out_imt[i] = it->second;
- }
- }
+
// Check that there are no stale methods are in the dex cache array.
if (kIsDebugBuild) {
auto* resolved_methods = klass->GetDexCache()->GetResolvedMethods();
@@ -5994,7 +6051,7 @@ bool ClassLinker::LinkInterfaceMethods(
} else {
self->EndAssertNoThreadSuspension(old_cause);
}
- if (kIsDebugBuild) {
+ if (kIsDebugBuild && !is_interface) {
SanityCheckVTable(klass, image_pointer_size_);
}
return true;
@@ -6384,7 +6441,13 @@ ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface());
}
break;
- case kSuper: // Fall-through.
+ case kSuper:
+ if (klass->IsInterface()) {
+ resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, image_pointer_size_);
+ } else {
+ resolved = klass->FindVirtualMethod(dex_cache.Get(), method_idx, image_pointer_size_);
+ }
+ break;
case kVirtual:
resolved = klass->FindVirtualMethod(dex_cache.Get(), method_idx, image_pointer_size_);
break;
@@ -6406,7 +6469,13 @@ ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
resolved = klass->FindInterfaceMethod(name, signature, image_pointer_size_);
DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface());
break;
- case kSuper: // Fall-through.
+ case kSuper:
+ if (klass->IsInterface()) {
+ resolved = klass->FindInterfaceMethod(name, signature, image_pointer_size_);
+ } else {
+ resolved = klass->FindVirtualMethod(name, signature, image_pointer_size_);
+ }
+ break;
case kVirtual:
resolved = klass->FindVirtualMethod(name, signature, image_pointer_size_);
break;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 99353c5657..471d7ca5e6 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -24,6 +24,7 @@
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "dex_file.h"
+#include "experimental_flags.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "gc/heap.h"
#include "mirror/abstract_method.h"
@@ -228,7 +229,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
if (klass->IsInterface()) {
EXPECT_EQ(0U, iftable->GetMethodArrayCount(i));
} else {
- EXPECT_EQ(interface->NumVirtualMethods(), iftable->GetMethodArrayCount(i));
+ EXPECT_EQ(interface->NumDeclaredVirtualMethods(), iftable->GetMethodArrayCount(i));
}
}
if (klass->IsAbstract()) {
diff --git a/runtime/class_table.h b/runtime/class_table.h
index c911365698..911f3c22db 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -106,8 +106,7 @@ class ClassTable {
// Combines all of the tables into one class set.
size_t WriteToMemory(uint8_t* ptr) const
- REQUIRES(Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
size_t ReadFromMemory(uint8_t* ptr)
REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 624abb9f6d..a4e16ae71f 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -429,7 +429,7 @@ void CommonRuntimeTest::TearDown() {
CHECK_EQ(0, dlclose(handle));
}
{
- void* handle = dlopen("libopenjdk.so", RTLD_LAZY);
+ void* handle = dlopen("libopenjdkd.so", RTLD_LAZY);
dlclose(handle);
CHECK_EQ(0, dlclose(handle));
}
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 40e2b1593e..b4208fe054 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -86,6 +86,14 @@ void ThrowAbstractMethodError(ArtMethod* method) {
PrettyMethod(method).c_str()).c_str());
}
+void ThrowAbstractMethodError(uint32_t method_idx, const DexFile& dex_file) {
+ ThrowException("Ljava/lang/AbstractMethodError;", /* referrer */ nullptr,
+ StringPrintf("abstract method \"%s\"",
+ PrettyMethod(method_idx,
+ dex_file,
+ /* with_signature */ true).c_str()).c_str());
+}
+
// ArithmeticException
void ThrowArithmeticExceptionDivideByZero() {
@@ -211,6 +219,22 @@ void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType foun
msg.str().c_str());
}
+void ThrowIncompatibleClassChangeErrorClassForInterfaceSuper(ArtMethod* method,
+ mirror::Class* target_class,
+ mirror::Object* this_object,
+ ArtMethod* referrer) {
+ // Referrer is calling interface_method on this_object, however, the interface_method isn't
+ // implemented by this_object.
+ CHECK(this_object != nullptr);
+ std::ostringstream msg;
+ msg << "Class '" << PrettyDescriptor(this_object->GetClass())
+ << "' does not implement interface '" << PrettyDescriptor(target_class) << "' in call to '"
+ << PrettyMethod(method) << "'";
+ ThrowException("Ljava/lang/IncompatibleClassChangeError;",
+ referrer != nullptr ? referrer->GetDeclaringClass() : nullptr,
+ msg.str().c_str());
+}
+
void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method,
mirror::Object* this_object,
ArtMethod* referrer) {
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 85fe2b3997..39c4e52b15 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -27,6 +27,7 @@ namespace mirror {
} // namespace mirror
class ArtField;
class ArtMethod;
+class DexFile;
class Signature;
class StringPiece;
@@ -35,6 +36,9 @@ class StringPiece;
void ThrowAbstractMethodError(ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+void ThrowAbstractMethodError(uint32_t method_idx, const DexFile& dex_file)
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+
// ArithmeticException
void ThrowArithmeticExceptionDivideByZero() SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
@@ -107,6 +111,12 @@ void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType foun
ArtMethod* method, ArtMethod* referrer)
SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+void ThrowIncompatibleClassChangeErrorClassForInterfaceSuper(ArtMethod* method,
+ mirror::Class* target_class,
+ mirror::Object* this_object,
+ ArtMethod* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+
void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method,
mirror::Object* this_object,
ArtMethod* referrer)
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 57d623e987..52da28bb50 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -27,7 +27,6 @@
#include "base/unix_file/fd_file.h"
#include "elf_file_impl.h"
#include "elf_utils.h"
-#include "jit/debugger_interface.h"
#include "leb128.h"
#include "utils.h"
@@ -53,8 +52,6 @@ ElfFileImpl<ElfTypes>::ElfFileImpl(File* file, bool writable,
hash_section_start_(nullptr),
symtab_symbol_table_(nullptr),
dynsym_symbol_table_(nullptr),
- jit_elf_image_(nullptr),
- jit_gdb_entry_(nullptr),
requested_base_(requested_base) {
CHECK(file != nullptr);
}
@@ -273,10 +270,6 @@ ElfFileImpl<ElfTypes>::~ElfFileImpl() {
STLDeleteElements(&segments_);
delete symtab_symbol_table_;
delete dynsym_symbol_table_;
- delete jit_elf_image_;
- if (jit_gdb_entry_) {
- DeleteJITCodeEntry(jit_gdb_entry_);
- }
}
template <typename ElfTypes>
@@ -1300,11 +1293,6 @@ bool ElfFileImpl<ElfTypes>::Load(bool executable, std::string* error_msg) {
return false;
}
- // Use GDB JIT support to do stack backtrace, etc.
- if (executable) {
- GdbJITSupport();
- }
-
return true;
}
@@ -1395,50 +1383,6 @@ void ElfFileImpl<ElfTypes>::ApplyOatPatches(
}
template <typename ElfTypes>
-void ElfFileImpl<ElfTypes>::GdbJITSupport() {
- // We only get here if we only are mapping the program header.
- DCHECK(program_header_only_);
-
- // Well, we need the whole file to do this.
- std::string error_msg;
- // Make it MAP_PRIVATE so we can just give it to gdb if all the necessary
- // sections are there.
- std::unique_ptr<ElfFileImpl<ElfTypes>> all_ptr(
- Open(const_cast<File*>(file_), PROT_READ | PROT_WRITE, MAP_PRIVATE, &error_msg));
- if (all_ptr.get() == nullptr) {
- return;
- }
- ElfFileImpl<ElfTypes>& all = *all_ptr;
-
- // We need the eh_frame for gdb but debug info might be present without it.
- const Elf_Shdr* eh_frame = all.FindSectionByName(".eh_frame");
- if (eh_frame == nullptr) {
- return;
- }
-
- // Do we have interesting sections?
- // We need to add in a strtab and symtab to the image.
- // all is MAP_PRIVATE so it can be written to freely.
- // We also already have strtab and symtab so we are fine there.
- Elf_Ehdr& elf_hdr = all.GetHeader();
- elf_hdr.e_entry = 0;
- elf_hdr.e_phoff = 0;
- elf_hdr.e_phnum = 0;
- elf_hdr.e_phentsize = 0;
- elf_hdr.e_type = ET_EXEC;
-
- // Since base_address_ is 0 if we are actually loaded at a known address (i.e. this is boot.oat)
- // and the actual address stuff starts at in regular files this is good.
- if (!all.FixupDebugSections(reinterpret_cast<intptr_t>(base_address_))) {
- LOG(ERROR) << "Failed to load GDB data";
- return;
- }
-
- jit_gdb_entry_ = CreateJITCodeEntry(all.Begin(), all.Size());
- gdb_file_mapping_.reset(all_ptr.release());
-}
-
-template <typename ElfTypes>
bool ElfFileImpl<ElfTypes>::Strip(std::string* error_msg) {
// ELF files produced by MCLinker look roughly like this
//
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 0f466bd7b8..2af31dc554 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -213,12 +213,6 @@ class ElfFileImpl {
SymbolTable* symtab_symbol_table_;
SymbolTable* dynsym_symbol_table_;
- // Support for GDB JIT
- uint8_t* jit_elf_image_;
- JITCodeEntry* jit_gdb_entry_;
- std::unique_ptr<ElfFileImpl<ElfTypes>> gdb_file_mapping_;
- void GdbJITSupport();
-
// Override the 'base' p_vaddr in the first LOAD segment with this value (if non-null).
uint8_t* requested_base_;
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index ba2fb9493f..9a9f42b976 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -416,10 +416,10 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_
return nullptr; // Failure.
} else if (access_check) {
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
- mirror::Class* referring_class = referrer->GetDeclaringClass();
bool can_access_resolved_method =
- referring_class->CheckResolvedMethodAccess<type>(methods_class, resolved_method,
- method_idx);
+ referrer->GetDeclaringClass()->CheckResolvedMethodAccess<type>(methods_class,
+ resolved_method,
+ method_idx);
if (UNLIKELY(!can_access_resolved_method)) {
DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
return nullptr; // Failure.
@@ -450,23 +450,56 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_
return klass->GetVTableEntry(vtable_index, class_linker->GetImagePointerSize());
}
case kSuper: {
- mirror::Class* super_class = referrer->GetDeclaringClass()->GetSuperClass();
- uint16_t vtable_index = resolved_method->GetMethodIndex();
- if (access_check) {
- // Check existence of super class.
- if (super_class == nullptr || !super_class->HasVTable() ||
- vtable_index >= static_cast<uint32_t>(super_class->GetVTableLength())) {
- // Behavior to agree with that of the verifier.
+ // TODO This lookup is quite slow.
+ // NB This is actually quite tricky to do any other way. We cannot use GetDeclaringClass since
+ // that will actually not be what we want in some cases where there are miranda methods or
+ // defaults. What we actually need is a GetContainingClass that says which classes virtuals
+ // this method is coming from.
+ mirror::Class* referring_class = referrer->GetDeclaringClass();
+ uint16_t method_type_idx = referring_class->GetDexFile().GetMethodId(method_idx).class_idx_;
+ mirror::Class* method_reference_class = class_linker->ResolveType(method_type_idx, referrer);
+ if (UNLIKELY(method_reference_class == nullptr)) {
+ // Bad type idx.
+ CHECK(self->IsExceptionPending());
+ return nullptr;
+ } else if (!method_reference_class->IsInterface()) {
+ // It is not an interface.
+ mirror::Class* super_class = referring_class->GetSuperClass();
+ uint16_t vtable_index = resolved_method->GetMethodIndex();
+ if (access_check) {
+ // Check existence of super class.
+ if (super_class == nullptr || !super_class->HasVTable() ||
+ vtable_index >= static_cast<uint32_t>(super_class->GetVTableLength())) {
+ // Behavior to agree with that of the verifier.
+ ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(),
+ resolved_method->GetName(), resolved_method->GetSignature());
+ return nullptr; // Failure.
+ }
+ }
+ DCHECK(super_class != nullptr);
+ DCHECK(super_class->HasVTable());
+ return super_class->GetVTableEntry(vtable_index, class_linker->GetImagePointerSize());
+ } else {
+ // It is an interface.
+ if (access_check) {
+ if (!method_reference_class->IsAssignableFrom((*this_object)->GetClass())) {
+ ThrowIncompatibleClassChangeErrorClassForInterfaceSuper(resolved_method,
+ method_reference_class,
+ *this_object,
+ referrer);
+ return nullptr; // Failure.
+ }
+ }
+ // TODO We can do better than this for a (compiled) fastpath.
+ ArtMethod* result = method_reference_class->FindVirtualMethodForInterfaceSuper(
+ resolved_method, class_linker->GetImagePointerSize());
+ // Throw an NSME if nullptr;
+ if (result == nullptr) {
ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(),
resolved_method->GetName(), resolved_method->GetSignature());
- return nullptr; // Failure.
}
- } else {
- // Super class must exist.
- DCHECK(super_class != nullptr);
+ return result;
}
- DCHECK(super_class->HasVTable());
- return super_class->GetVTableEntry(vtable_index, class_linker->GetImagePointerSize());
}
case kInterface: {
uint32_t imt_index = resolved_method->GetDexMethodIndex() % mirror::Class::kImtSize;
@@ -576,8 +609,9 @@ inline ArtMethod* FindMethodFast(uint32_t method_idx, mirror::Object* this_objec
if (UNLIKELY(this_object == nullptr && type != kStatic)) {
return nullptr;
}
+ mirror::Class* referring_class = referrer->GetDeclaringClass();
ArtMethod* resolved_method =
- referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx, sizeof(void*));
+ referring_class->GetDexCache()->GetResolvedMethod(method_idx, sizeof(void*));
if (UNLIKELY(resolved_method == nullptr)) {
return nullptr;
}
@@ -588,7 +622,6 @@ inline ArtMethod* FindMethodFast(uint32_t method_idx, mirror::Object* this_objec
return nullptr;
}
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
- mirror::Class* referring_class = referrer->GetDeclaringClass();
if (UNLIKELY(!referring_class->CanAccess(methods_class) ||
!referring_class->CanAccessMember(methods_class,
resolved_method->GetAccessFlags()))) {
@@ -601,12 +634,25 @@ inline ArtMethod* FindMethodFast(uint32_t method_idx, mirror::Object* this_objec
} else if (type == kStatic || type == kDirect) {
return resolved_method;
} else if (type == kSuper) {
- mirror::Class* super_class = referrer->GetDeclaringClass()->GetSuperClass();
- if (resolved_method->GetMethodIndex() >= super_class->GetVTableLength()) {
- // The super class does not have the method.
+ // TODO This lookup is rather slow.
+ uint16_t method_type_idx = referring_class->GetDexFile().GetMethodId(method_idx).class_idx_;
+ mirror::Class* method_reference_class =
+ referring_class->GetDexCache()->GetResolvedType(method_type_idx);
+ if (method_reference_class == nullptr) {
+ // Need to do full type resolution...
return nullptr;
+ } else if (!method_reference_class->IsInterface()) {
+ // It is not an interface.
+ mirror::Class* super_class = referrer->GetDeclaringClass()->GetSuperClass();
+ if (resolved_method->GetMethodIndex() >= super_class->GetVTableLength()) {
+ // The super class does not have the method.
+ return nullptr;
+ }
+ return super_class->GetVTableEntry(resolved_method->GetMethodIndex(), sizeof(void*));
+ } else {
+ return method_reference_class->FindVirtualMethodForInterfaceSuper(
+ resolved_method, sizeof(void*));
}
- return super_class->GetVTableEntry(resolved_method->GetMethodIndex(), sizeof(void*));
} else {
DCHECK(type == kVirtual);
return this_object->GetClass()->GetVTableEntry(
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index dc9f14c5f2..f87d48de54 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -122,7 +122,10 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, rosalloc_runs, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, mterp_current_ibase, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_alt_ibase, rosalloc_runs, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, rosalloc_runs, thread_local_alloc_stack_top,
sizeof(void*) * kNumRosAllocThreadLocalSizeBrackets);
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_top, thread_local_alloc_stack_end,
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 7f67ae4f08..d6c1817f13 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -571,15 +571,19 @@ Heap::Heap(size_t initial_size,
// Check that there's no gap between the image space and the non moving space so that the
// immune region won't break (eg. due to a large object allocated in the gap). This is only
// required when we're the zygote or using GSS.
- /* TODO: Modify this check to support multi-images. b/26317072
- bool no_gap = MemMap::CheckNoGaps(GetBootImageSpace()->GetMemMap(),
- non_moving_space_->GetMemMap());
+ // Space with smallest Begin().
+ space::ImageSpace* first_space = nullptr;
+ for (space::ImageSpace* space : boot_image_spaces_) {
+ if (first_space == nullptr || space->Begin() < first_space->Begin()) {
+ first_space = space;
+ }
+ }
+ bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
if (!no_gap) {
PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
MemMap::DumpMaps(LOG(ERROR), true);
LOG(FATAL) << "There's a gap between the image space and the non-moving space";
}
- */
}
instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
if (gc_stress_mode_) {
@@ -2333,7 +2337,7 @@ void Heap::PreZygoteFork() {
if (HasZygoteSpace()) {
return;
}
- Runtime::Current()->GetInternTable()->SwapPostZygoteWithPreZygote();
+ Runtime::Current()->GetInternTable()->AddNewTable();
Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
VLOG(heap) << "Starting PreZygoteFork";
// Trim the pages at the end of the non moving space.
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index dfdbd04427..5f6bb8ee4b 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -47,13 +47,15 @@ ImageSpace::ImageSpace(const std::string& image_filename,
const char* image_location,
MemMap* mem_map,
accounting::ContinuousSpaceBitmap* live_bitmap,
- uint8_t* end,
- MemMap* shadow_map)
- : MemMapSpace(image_filename, mem_map, mem_map->Begin(), end, end,
+ uint8_t* end)
+ : MemMapSpace(image_filename,
+ mem_map,
+ mem_map->Begin(),
+ end,
+ end,
kGcRetentionPolicyNeverCollect),
oat_file_non_owned_(nullptr),
- image_location_(image_location),
- shadow_map_(shadow_map) {
+ image_location_(image_location) {
DCHECK(live_bitmap != nullptr);
live_bitmap_.reset(live_bitmap);
}
@@ -800,54 +802,19 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
uint32_t bitmap_index = bitmap_index_.FetchAndAddSequentiallyConsistent(1);
std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_filename,
bitmap_index));
+ // Bitmap only needs to cover until the end of the mirror objects section.
+ const ImageSection& image_objects = image_header.GetImageSection(ImageHeader::kSectionObjects);
std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
accounting::ContinuousSpaceBitmap::CreateFromMemMap(
bitmap_name,
image_bitmap_map.release(),
reinterpret_cast<uint8_t*>(map->Begin()),
- accounting::ContinuousSpaceBitmap::ComputeHeapSize(bitmap_section.Size())));
+ image_objects.End()));
if (bitmap == nullptr) {
*error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
return nullptr;
}
- // In case of multi-images, the images are spaced apart so that the bitmaps don't overlap. We
- // need to reserve the slack, as otherwise the large object space might allocate in there.
- // TODO: Reconsider the multi-image layout. b/26317072
- std::unique_ptr<MemMap> shadow_map;
- {
- uintptr_t image_begin = reinterpret_cast<uintptr_t>(image_header.GetImageBegin());
- uintptr_t image_end = RoundUp(image_begin + image_header.GetImageSize(), kPageSize);
- uintptr_t oat_begin = reinterpret_cast<uintptr_t>(image_header.GetOatFileBegin());
- if (image_end < oat_begin) {
- // There's a gap. Could be multi-image, could be the oat file spaced apart. Go ahead and
- // dummy-reserve the space covered by the bitmap (which will be a shadow that introduces
- // a gap to the next image).
- uintptr_t heap_size = bitmap->HeapSize();
- uintptr_t bitmap_coverage_end = RoundUp(image_begin + heap_size, kPageSize);
- if (bitmap_coverage_end > image_end) {
- VLOG(startup) << "Reserving bitmap shadow ["
- << std::hex << image_end << ";"
- << std::hex << bitmap_coverage_end << ";] (oat file begins at "
- << std::hex << oat_begin;
- // Note: we cannot use MemMap::Dummy here, as that won't reserve the space in 32-bit mode.
- shadow_map.reset(MemMap::MapAnonymous("Image bitmap shadow",
- reinterpret_cast<uint8_t*>(image_end),
- bitmap_coverage_end - image_end,
- PROT_NONE,
- false,
- false,
- error_msg));
- if (shadow_map == nullptr) {
- return nullptr;
- }
- // madvise it away, we don't really want it, just reserve the address space.
- // TODO: Should we use MadviseDontNeedAndZero? b/26317072
- madvise(shadow_map->BaseBegin(), shadow_map->BaseSize(), MADV_DONTNEED);
- }
- }
- }
-
// We only want the mirror object, not the ArtFields and ArtMethods.
uint8_t* const image_end =
map->Begin() + image_header.GetImageSection(ImageHeader::kSectionObjects).End();
@@ -855,8 +822,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
image_location,
map.release(),
bitmap.release(),
- image_end,
- shadow_map.release()));
+ image_end));
// VerifyImageAllocations() will be called later in Runtime::Init()
// as some class roots like ArtMethod::java_lang_reflect_ArtMethod_
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index b8ae4a033a..9c8e8b2014 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -171,8 +171,7 @@ class ImageSpace : public MemMapSpace {
const char* image_location,
MemMap* mem_map,
accounting::ContinuousSpaceBitmap* live_bitmap,
- uint8_t* end,
- MemMap* shadow_map = nullptr);
+ uint8_t* end);
// The OatFile associated with the image during early startup to
// reserve space contiguous to the image. It is later released to
@@ -185,10 +184,6 @@ class ImageSpace : public MemMapSpace {
const std::string image_location_;
- // A MemMap reserving the space of the bitmap "shadow," so that we don't allocate into it. Only
- // used in the multi-image case.
- std::unique_ptr<MemMap> shadow_map_;
-
private:
DISALLOW_COPY_AND_ASSIGN(ImageSpace);
};
diff --git a/runtime/image.cc b/runtime/image.cc
index 3856787e2f..3cb66428fa 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '4', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '5', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 726cf1b461..5e0a11d2d1 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -417,6 +417,13 @@ class Instrumentation {
!Locks::classlinker_classes_lock_);
void UpdateInterpreterHandlerTable() REQUIRES(Locks::mutator_lock_) {
+ /*
+ * TUNING: Dalvik's mterp stashes the actual current handler table base in a
+ * tls field. For Arm, this enables all suspend, debug & tracing checks to be
+ * collapsed into a single conditionally-executed ldw instruction.
+ * Move to Dalvik-style handler-table management for both the goto interpreter and
+ * mterp.
+ */
interpreter_handler_table_ = IsActive() ? kAlternativeHandlerTable : kMainHandlerTable;
}
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index d035f5d960..015bf98e38 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -32,7 +32,8 @@
namespace art {
InternTable::InternTable()
- : image_added_to_intern_table_(false), log_new_roots_(false),
+ : images_added_to_intern_table_(false),
+ log_new_roots_(false),
weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
weak_root_state_(gc::kWeakRootStateNormal) {
}
@@ -93,10 +94,10 @@ mirror::String* InternTable::LookupWeak(mirror::String* s) {
return weak_interns_.Find(s);
}
-void InternTable::SwapPostZygoteWithPreZygote() {
+void InternTable::AddNewTable() {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- weak_interns_.SwapPostZygoteWithPreZygote();
- strong_interns_.SwapPostZygoteWithPreZygote();
+ weak_interns_.AddNewTable();
+ strong_interns_.AddNewTable();
}
mirror::String* InternTable::InsertStrong(mirror::String* s) {
@@ -150,15 +151,14 @@ void InternTable::RemoveWeakFromTransaction(mirror::String* s) {
RemoveWeak(s);
}
-void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) {
- CHECK(image_space != nullptr);
+void InternTable::AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- if (!image_added_to_intern_table_) {
+ for (gc::space::ImageSpace* image_space : image_spaces) {
const ImageHeader* const header = &image_space->GetImageHeader();
// Check if we have the interned strings section.
const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
if (section.Size() > 0) {
- ReadFromMemoryLocked(image_space->Begin() + section.Offset());
+ AddTableFromMemoryLocked(image_space->Begin() + section.Offset());
} else {
// TODO: Delete this logic?
mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches);
@@ -179,15 +179,13 @@ void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) {
}
}
}
- image_added_to_intern_table_ = true;
}
+ images_added_to_intern_table_ = true;
}
mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
- if (image_added_to_intern_table_) {
- return nullptr;
- }
- std::vector<gc::space::ImageSpace*> image_spaces =
+ DCHECK(!images_added_to_intern_table_);
+ const std::vector<gc::space::ImageSpace*>& image_spaces =
Runtime::Current()->GetHeap()->GetBootImageSpaces();
if (image_spaces.empty()) {
return nullptr; // No image present.
@@ -284,9 +282,11 @@ mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool hold
return weak;
}
// Check the image for a match.
- mirror::String* image = LookupStringFromImage(s);
- if (image != nullptr) {
- return is_strong ? InsertStrong(image) : InsertWeak(image);
+ if (!images_added_to_intern_table_) {
+ mirror::String* const image_string = LookupStringFromImage(s);
+ if (image_string != nullptr) {
+ return is_strong ? InsertStrong(image_string) : InsertWeak(image_string);
+ }
}
// No match in the strong table or the weak table. Insert into the strong / weak table.
return is_strong ? InsertStrong(s) : InsertWeak(s);
@@ -326,27 +326,18 @@ void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
weak_interns_.SweepWeaks(visitor);
}
-void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
- const ImageSection& intern_section = image_space->GetImageHeader().GetImageSection(
- ImageHeader::kSectionInternedStrings);
- // Read the string tables from the image.
- const uint8_t* ptr = image_space->Begin() + intern_section.Offset();
- const size_t offset = ReadFromMemory(ptr);
- CHECK_LE(offset, intern_section.Size());
-}
-
-size_t InternTable::ReadFromMemory(const uint8_t* ptr) {
+size_t InternTable::AddTableFromMemory(const uint8_t* ptr) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- return ReadFromMemoryLocked(ptr);
+ return AddTableFromMemoryLocked(ptr);
}
-size_t InternTable::ReadFromMemoryLocked(const uint8_t* ptr) {
- return strong_interns_.ReadIntoPreZygoteTable(ptr);
+size_t InternTable::AddTableFromMemoryLocked(const uint8_t* ptr) {
+ return strong_interns_.AddTableFromMemory(ptr);
}
size_t InternTable::WriteToMemory(uint8_t* ptr) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- return strong_interns_.WriteFromPostZygoteTable(ptr);
+ return strong_interns_.WriteToMemory(ptr);
}
std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
@@ -364,71 +355,87 @@ bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
return a.Read()->Equals(b.Read());
}
-size_t InternTable::Table::ReadIntoPreZygoteTable(const uint8_t* ptr) {
- CHECK_EQ(pre_zygote_table_.Size(), 0u);
+size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
size_t read_count = 0;
- pre_zygote_table_ = UnorderedSet(ptr, false /* make copy */, &read_count);
+ UnorderedSet set(ptr, /*make copy*/false, &read_count);
+ // TODO: Disable this for app images if app images have intern tables.
+ static constexpr bool kCheckDuplicates = true;
+ if (kCheckDuplicates) {
+ for (GcRoot<mirror::String>& string : set) {
+ CHECK(Find(string.Read()) == nullptr) << "Already found " << string.Read()->ToModifiedUtf8();
+ }
+ }
+ // Insert at the front since we insert into the back.
+ tables_.insert(tables_.begin(), std::move(set));
return read_count;
}
-size_t InternTable::Table::WriteFromPostZygoteTable(uint8_t* ptr) {
- return post_zygote_table_.WriteToMemory(ptr);
+size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
+ if (tables_.empty()) {
+ return 0;
+ }
+ UnorderedSet* table_to_write;
+ UnorderedSet combined;
+ if (tables_.size() > 1) {
+ table_to_write = &combined;
+ for (UnorderedSet& table : tables_) {
+ for (GcRoot<mirror::String>& string : table) {
+ combined.Insert(string);
+ }
+ }
+ } else {
+ table_to_write = &tables_.back();
+ }
+ return table_to_write->WriteToMemory(ptr);
}
void InternTable::Table::Remove(mirror::String* s) {
- auto it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
- if (it != post_zygote_table_.end()) {
- post_zygote_table_.Erase(it);
- } else {
- it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
- DCHECK(it != pre_zygote_table_.end());
- pre_zygote_table_.Erase(it);
+ for (UnorderedSet& table : tables_) {
+ auto it = table.Find(GcRoot<mirror::String>(s));
+ if (it != table.end()) {
+ table.Erase(it);
+ return;
+ }
}
+ LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8();
}
mirror::String* InternTable::Table::Find(mirror::String* s) {
Locks::intern_table_lock_->AssertHeld(Thread::Current());
- auto it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
- if (it != pre_zygote_table_.end()) {
- return it->Read();
- }
- it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
- if (it != post_zygote_table_.end()) {
- return it->Read();
+ for (UnorderedSet& table : tables_) {
+ auto it = table.Find(GcRoot<mirror::String>(s));
+ if (it != table.end()) {
+ return it->Read();
+ }
}
return nullptr;
}
-void InternTable::Table::SwapPostZygoteWithPreZygote() {
- if (pre_zygote_table_.Empty()) {
- std::swap(pre_zygote_table_, post_zygote_table_);
- VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table";
- } else {
- // This case happens if read the intern table from the image.
- VLOG(heap) << "Not swapping due to non-empty pre_zygote_table_";
- }
+void InternTable::Table::AddNewTable() {
+ tables_.push_back(UnorderedSet());
}
void InternTable::Table::Insert(mirror::String* s) {
- // Always insert the post zygote table, this gets swapped when we create the zygote to be the
- // pre zygote table.
- post_zygote_table_.Insert(GcRoot<mirror::String>(s));
+ // Always insert the last table, the image tables are before and we avoid inserting into these
+ // to prevent dirty pages.
+ DCHECK(!tables_.empty());
+ tables_.back().Insert(GcRoot<mirror::String>(s));
}
void InternTable::Table::VisitRoots(RootVisitor* visitor) {
BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
visitor, RootInfo(kRootInternedString));
- for (auto& intern : pre_zygote_table_) {
- buffered_visitor.VisitRoot(intern);
- }
- for (auto& intern : post_zygote_table_) {
- buffered_visitor.VisitRoot(intern);
+ for (UnorderedSet& table : tables_) {
+ for (auto& intern : table) {
+ buffered_visitor.VisitRoot(intern);
+ }
}
}
void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
- SweepWeaks(&pre_zygote_table_, visitor);
- SweepWeaks(&post_zygote_table_, visitor);
+ for (UnorderedSet& table : tables_) {
+ SweepWeaks(&table, visitor);
+ }
}
void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) {
@@ -446,7 +453,12 @@ void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
}
size_t InternTable::Table::Size() const {
- return pre_zygote_table_.Size() + post_zygote_table_.Size();
+ return std::accumulate(tables_.begin(),
+ tables_.end(),
+ 0U,
+ [](size_t sum, const UnorderedSet& set) {
+ return sum + set.Size();
+ });
}
void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
@@ -464,10 +476,10 @@ void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
InternTable::Table::Table() {
Runtime* const runtime = Runtime::Current();
- pre_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
- runtime->GetHashTableMaxLoadFactor());
- post_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
- runtime->GetHashTableMaxLoadFactor());
+ // Initial table.
+ tables_.push_back(UnorderedSet());
+ tables_.back().SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
+ runtime->GetHashTableMaxLoadFactor());
}
} // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 3a4e8d8f11..8f715a3dc3 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -98,22 +98,20 @@ class InternTable {
void BroadcastForNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
- // Adds all of the resolved image strings from the image space into the intern table. The
- // advantage of doing this is preventing expensive DexFile::FindStringId calls.
- void AddImageStringsToTable(gc::space::ImageSpace* image_space)
+ // Adds all of the resolved image strings from the image spaces into the intern table. The
+ // advantage of doing this is preventing expensive DexFile::FindStringId calls. Sets
+ // images_added_to_intern_table_ to true.
+ void AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
- // Copy the post zygote tables to pre zygote to save memory by preventing dirty pages.
- void SwapPostZygoteWithPreZygote()
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
-
- // Add an intern table which was serialized to the image.
- void AddImageInternTable(gc::space::ImageSpace* image_space)
+ // Add a new intern table for inserting to, previous intern tables are still there but no
+ // longer inserted into and ideally unmodified. This is done to prevent dirty pages.
+ void AddNewTable()
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
// Read the intern table from memory. The elements aren't copied, the intern hash set data will
// point to somewhere within ptr. Only reads the strong interns.
- size_t ReadFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_)
+ size_t AddTableFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
// Write the post zygote intern table to a pointer. Only writes the strong interns since it is
@@ -157,15 +155,17 @@ class InternTable {
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void SweepWeaks(IsMarkedVisitor* visitor)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
- void SwapPostZygoteWithPreZygote() REQUIRES(Locks::intern_table_lock_);
+ // Add a new intern table that will only be inserted into from now on.
+ void AddNewTable() REQUIRES(Locks::intern_table_lock_);
size_t Size() const REQUIRES(Locks::intern_table_lock_);
- // Read pre zygote table is called from ReadFromMemory which happens during runtime creation
- // when we load the image intern table. Returns how many bytes were read.
- size_t ReadIntoPreZygoteTable(const uint8_t* ptr)
+ // Read and add an intern table from ptr.
+ // Tables read are inserted at the front of the table array. Only checks for conflicts in
+ // debug builds. Returns how many bytes were read.
+ size_t AddTableFromMemory(const uint8_t* ptr)
REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
- // The image writer calls WritePostZygoteTable through WriteToMemory, it writes the interns in
- // the post zygote table. Returns how many bytes were written.
- size_t WriteFromPostZygoteTable(uint8_t* ptr)
+ // Write the intern tables to ptr, if there are multiple tables they are combined into a single
+ // one. Returns how many bytes were written.
+ size_t WriteToMemory(uint8_t* ptr)
REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
private:
@@ -175,12 +175,9 @@ class InternTable {
void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
- // We call SwapPostZygoteWithPreZygote when we create the zygote to reduce private dirty pages
- // caused by modifying the zygote intern table hash table. The pre zygote table are the
- // interned strings which were interned before we created the zygote space. Post zygote is self
- // explanatory.
- UnorderedSet pre_zygote_table_;
- UnorderedSet post_zygote_table_;
+ // We call AddNewTable when we create the zygote to reduce private dirty pages caused by
+ // modifying the zygote intern table. The back of table is modified when strings are interned.
+ std::vector<UnorderedSet> tables_;
};
// Insert if non null, otherwise return null. Must be called holding the mutator lock.
@@ -214,7 +211,7 @@ class InternTable {
void RemoveWeakFromTransaction(mirror::String* s)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
- size_t ReadFromMemoryLocked(const uint8_t* ptr)
+ size_t AddTableFromMemoryLocked(const uint8_t* ptr)
REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Change the weak root state. May broadcast to waiters.
@@ -225,7 +222,7 @@ class InternTable {
void WaitUntilAccessible(Thread* self)
REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
- bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
+ bool images_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
ConditionVariable weak_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
// Since this contains (strong) roots, they need a read barrier to
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 8d5a61a44b..e7b4731ef8 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -25,6 +25,7 @@
#include "ScopedLocalRef.h"
#include "stack.h"
#include "unstarted_runtime.h"
+#include "mterp/mterp.h"
namespace art {
namespace interpreter {
@@ -224,19 +225,33 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
}
enum InterpreterImplKind {
- kSwitchImpl, // Switch-based interpreter implementation.
- kComputedGotoImplKind // Computed-goto-based interpreter implementation.
+ kSwitchImplKind, // Switch-based interpreter implementation.
+ kComputedGotoImplKind, // Computed-goto-based interpreter implementation.
+ kMterpImplKind // Assembly interpreter
};
static std::ostream& operator<<(std::ostream& os, const InterpreterImplKind& rhs) {
- os << ((rhs == kSwitchImpl) ? "Switch-based interpreter" : "Computed-goto-based interpreter");
+ os << ((rhs == kSwitchImplKind)
+ ? "Switch-based interpreter"
+ : (rhs == kComputedGotoImplKind)
+ ? "Computed-goto-based interpreter"
+ : "Asm interpreter");
return os;
}
#if !defined(__clang__)
+#if defined(__arm__) && !defined(ART_USE_READ_BARRIER)
+// TODO: remove when all targets implemented.
+static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
+#else
static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKind;
+#endif
#else
// Clang 3.4 fails to build the goto interpreter implementation.
-static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImpl;
+#if defined(__arm__) && !defined(ART_USE_READ_BARRIER)
+static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
+#else
+static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImplKind;
+#endif
template<bool do_access_check, bool transaction_active>
JValue ExecuteGotoImpl(Thread*, const DexFile::CodeItem*, ShadowFrame&, JValue) {
LOG(FATAL) << "UNREACHABLE";
@@ -263,18 +278,52 @@ static JValue Execute(Thread* self, const DexFile::CodeItem* code_item, ShadowFr
static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register) {
- DCHECK(shadow_frame.GetMethod()->IsInvokable());
+ DCHECK(!shadow_frame.GetMethod()->IsAbstract());
DCHECK(!shadow_frame.GetMethod()->IsNative());
shadow_frame.GetMethod()->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
bool transaction_active = Runtime::Current()->IsActiveTransaction();
if (LIKELY(shadow_frame.GetMethod()->IsPreverified())) {
// Enter the "without access check" interpreter.
- if (kInterpreterImplKind == kSwitchImpl) {
+ if (kInterpreterImplKind == kMterpImplKind) {
if (transaction_active) {
- return ExecuteSwitchImpl<false, true>(self, code_item, shadow_frame, result_register);
+ // No Mterp variant - just use the switch interpreter.
+ return ExecuteSwitchImpl<false, true>(self, code_item, shadow_frame, result_register,
+ false);
} else {
- return ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame, result_register);
+ const instrumentation::Instrumentation* const instrumentation =
+ Runtime::Current()->GetInstrumentation();
+ while (true) {
+ if (instrumentation->IsActive() || !Runtime::Current()->IsStarted()) {
+ // TODO: allow JIT profiling instrumentation. Now, just punt on all instrumentation.
+#if !defined(__clang__)
+ return ExecuteGotoImpl<false, false>(self, code_item, shadow_frame, result_register);
+#else
+ return ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame, result_register,
+ false);
+#endif
+ }
+ bool returned = ExecuteMterpImpl(self, code_item, &shadow_frame, &result_register);
+ if (returned) {
+ return result_register;
+ } else {
+ // Mterp didn't like that instruction. Single-step it with the reference interpreter.
+ JValue res = ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame,
+ result_register, true);
+ if (shadow_frame.GetDexPC() == DexFile::kDexNoIndex) {
+ // Single-stepped a return or an exception not handled locally. Return to caller.
+ return res;
+ }
+ }
+ }
+ }
+ } else if (kInterpreterImplKind == kSwitchImplKind) {
+ if (transaction_active) {
+ return ExecuteSwitchImpl<false, true>(self, code_item, shadow_frame, result_register,
+ false);
+ } else {
+ return ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame, result_register,
+ false);
}
} else {
DCHECK_EQ(kInterpreterImplKind, kComputedGotoImplKind);
@@ -286,11 +335,22 @@ static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item,
}
} else {
// Enter the "with access check" interpreter.
- if (kInterpreterImplKind == kSwitchImpl) {
+ if (kInterpreterImplKind == kMterpImplKind) {
+ // No access check variants for Mterp. Just use the switch version.
+ if (transaction_active) {
+ return ExecuteSwitchImpl<true, true>(self, code_item, shadow_frame, result_register,
+ false);
+ } else {
+ return ExecuteSwitchImpl<true, false>(self, code_item, shadow_frame, result_register,
+ false);
+ }
+ } else if (kInterpreterImplKind == kSwitchImplKind) {
if (transaction_active) {
- return ExecuteSwitchImpl<true, true>(self, code_item, shadow_frame, result_register);
+ return ExecuteSwitchImpl<true, true>(self, code_item, shadow_frame, result_register,
+ false);
} else {
- return ExecuteSwitchImpl<true, false>(self, code_item, shadow_frame, result_register);
+ return ExecuteSwitchImpl<true, false>(self, code_item, shadow_frame, result_register,
+ false);
}
} else {
DCHECK_EQ(kInterpreterImplKind, kComputedGotoImplKind);
@@ -501,5 +561,13 @@ void ArtInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* co
self->PopShadowFrame();
}
+void CheckInterpreterAsmConstants() {
+ CheckMterpAsmConstants();
+}
+
+void InitInterpreterTls(Thread* self) {
+ InitMterpTls(self);
+}
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 8e7f3da1ba..6353a9b7bf 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -50,6 +50,11 @@ void ArtInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* co
ShadowFrame* shadow_frame, JValue* result)
SHARED_REQUIRES(Locks::mutator_lock_);
+// One-time sanity check.
+void CheckInterpreterAsmConstants();
+
+void InitInterpreterTls(Thread* self);
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 9f6699f730..932d255b0e 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -67,16 +67,21 @@ using ::art::mirror::Throwable;
namespace art {
namespace interpreter {
-// External references to both interpreter implementations.
+// External references to all interpreter implementations.
template<bool do_access_check, bool transaction_active>
extern JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
+ ShadowFrame& shadow_frame, JValue result_register,
+ bool interpret_one_instruction);
template<bool do_access_check, bool transaction_active>
extern JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
+// Mterp does not support transactions or access check, thus no templated versions.
+extern "C" bool ExecuteMterpImpl(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result_register);
+
void ThrowNullPointerExceptionFromInterpreter()
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index c9831e67aa..bab0d40000 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -35,6 +35,9 @@ namespace interpreter {
/* Structured locking is to be enforced for abnormal termination, too. */ \
shadow_frame.GetLockCountData(). \
CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self); \
+ if (interpret_one_instruction) { \
+ shadow_frame.SetDexPC(DexFile::kDexNoIndex); \
+ } \
return JValue(); /* Handled in caller. */ \
} else { \
int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc); \
@@ -78,7 +81,8 @@ static bool IsExperimentalInstructionEnabled(const Instruction *inst) {
template<bool do_access_check, bool transaction_active>
JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register) {
+ ShadowFrame& shadow_frame, JValue result_register,
+ bool interpret_one_instruction) {
constexpr bool do_assignability_check = do_access_check;
if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
LOG(FATAL) << "Invalid shadow frame for interpreter use";
@@ -105,7 +109,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
// to keep this live for the scope of the entire function call.
std::unique_ptr<lambda::ClosureBuilder> lambda_closure_builder;
size_t lambda_captured_variable_index = 0;
- while (true) {
+ do {
dex_pc = inst->GetDexPc(insns);
shadow_frame.SetDexPC(dex_pc);
TraceExecution(shadow_frame, inst, dex_pc);
@@ -203,6 +207,9 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
}
+ if (interpret_one_instruction) {
+ shadow_frame.SetDexPC(DexFile::kDexNoIndex);
+ }
return result;
}
case Instruction::RETURN_VOID: {
@@ -216,6 +223,9 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
}
+ if (interpret_one_instruction) {
+ shadow_frame.SetDexPC(DexFile::kDexNoIndex);
+ }
return result;
}
case Instruction::RETURN: {
@@ -230,6 +240,9 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
}
+ if (interpret_one_instruction) {
+ shadow_frame.SetDexPC(DexFile::kDexNoIndex);
+ }
return result;
}
case Instruction::RETURN_WIDE: {
@@ -243,6 +256,9 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
}
+ if (interpret_one_instruction) {
+ shadow_frame.SetDexPC(DexFile::kDexNoIndex);
+ }
return result;
}
case Instruction::RETURN_OBJECT: {
@@ -278,6 +294,9 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
}
+ if (interpret_one_instruction) {
+ shadow_frame.SetDexPC(DexFile::kDexNoIndex);
+ }
return result;
}
case Instruction::CONST_4: {
@@ -2370,22 +2389,29 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::UNUSED_7A:
UnexpectedOpcode(inst, shadow_frame);
}
- }
+ } while (!interpret_one_instruction);
+ // Record where we stopped.
+ shadow_frame.SetDexPC(inst->GetDexPc(insns));
+ return JValue();
} // NOLINT(readability/fn_size)
// Explicit definitions of ExecuteSwitchImpl.
template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
JValue ExecuteSwitchImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
+ ShadowFrame& shadow_frame, JValue result_register,
+ bool interpret_one_instruction);
template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
JValue ExecuteSwitchImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
+ ShadowFrame& shadow_frame, JValue result_register,
+ bool interpret_one_instruction);
template SHARED_REQUIRES(Locks::mutator_lock_)
JValue ExecuteSwitchImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
+ ShadowFrame& shadow_frame, JValue result_register,
+ bool interpret_one_instruction);
template SHARED_REQUIRES(Locks::mutator_lock_)
JValue ExecuteSwitchImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
+ ShadowFrame& shadow_frame, JValue result_register,
+ bool interpret_one_instruction);
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/mterp/Makefile_mterp b/runtime/interpreter/mterp/Makefile_mterp
new file mode 100644
index 0000000000..f0c30ad56c
--- /dev/null
+++ b/runtime/interpreter/mterp/Makefile_mterp
@@ -0,0 +1,49 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Makefile for the Art fast interpreter. This is not currently
+# integrated into the build system.
+#
+
+SHELL := /bin/sh
+
+# Build system has TARGET_ARCH=arm, but we can support the exact architecture
+# if it is worthwhile.
+#
+# To generate sources:
+# for arch in arm arm64 x86 x86_64 mips mips64
+# do
+# TARGET_ARCH_EXT=$arch make -f Makefile-mterp
+# done
+#
+
+OUTPUT_DIR := out
+
+# Accumulate all possible dependencies for the generated files in a very
+# conservative fashion. If it's not one of the generated files in "out",
+# assume it's a dependency.
+SOURCE_DEPS := \
+ $(shell find . -path ./$(OUTPUT_DIR) -prune -o -type f -print) \
+
+# Source files generated by the script. There's always one C and one
+# assembly file, though in practice one or the other could be empty.
+GEN_SOURCES := \
+ $(OUTPUT_DIR)/interp_asm_$(TARGET_ARCH_EXT).S
+
+target: $(GEN_SOURCES)
+
+$(GEN_SOURCES): $(SOURCE_DEPS)
+ @mkdir -p out
+ ./gen_mterp.py $(TARGET_ARCH_EXT) $(OUTPUT_DIR)
diff --git a/runtime/interpreter/mterp/README.txt b/runtime/interpreter/mterp/README.txt
new file mode 100644
index 0000000000..19e02bec50
--- /dev/null
+++ b/runtime/interpreter/mterp/README.txt
@@ -0,0 +1,197 @@
+rt "mterp" README
+
+NOTE: Find rebuilding instructions at the bottom of this file.
+
+
+==== Overview ====
+
+Every configuration has a "config-*" file that controls how the sources
+are generated. The sources are written into the "out" directory, where
+they are picked up by the Android build system.
+
+The best way to become familiar with the interpreter is to look at the
+generated files in the "out" directory.
+
+
+==== Config file format ====
+
+The config files are parsed from top to bottom. Each line in the file
+may be blank, hold a comment (line starts with '#'), or be a command.
+
+The commands are:
+
+ handler-style <computed-goto|jump-table>
+
+ Specify which style of interpreter to generate. In computed-goto,
+ each handler is allocated a fixed region, allowing transitions to
+ be done via table-start-address + (opcode * handler-size). With
+ jump-table style, handlers may be of any length, and the generated
+ table is an array of pointers to the handlers. This command is required,
+ and must be the first command in the config file.
+
+ handler-size <bytes>
+
+ Specify the size of the fixed region, in bytes. On most platforms
+ this will need to be a power of 2. For jump-table implementations,
+ this command is ignored.
+
+ import <filename>
+
+ The specified file is included immediately, in its entirety. No
+ substitutions are performed. ".cpp" and ".h" files are copied to the
+ C output, ".S" files are copied to the asm output.
+
+ asm-alt-stub <filename>
+
+ When present, this command will cause the generation of an alternate
+ set of entry points (for computed-goto interpreters) or an alternate
+ jump table (for jump-table interpreters).
+
+ fallback-stub <filename>
+
+ Specifies a file to be used for the special FALLBACK tag on the "op"
+ command below. Intended to be used to transfer control to an alternate
+ interpreter to single-step a not-yet-implemented opcode. Note: should
+ note be used on RETURN-class instructions.
+
+ op-start <directory>
+
+ Indicates the start of the opcode list. Must precede any "op"
+ commands. The specified directory is the default location to pull
+ instruction files from.
+
+ op <opcode> <directory>|FALLBACK
+
+ Can only appear after "op-start" and before "op-end". Overrides the
+ default source file location of the specified opcode. The opcode
+ definition will come from the specified file, e.g. "op OP_NOP arm"
+ will load from "arm/OP_NOP.S". A substitution dictionary will be
+ applied (see below). If the special "FALLBACK" token is used instead of
+ a directory name, the source file specified in fallback-stub will instead
+ be used for this opcode.
+
+ alt <opcode> <directory>
+
+ Can only appear after "op-start" and before "op-end". Similar to the
+ "op" command above, but denotes a source file to override the entry
+ in the alternate handler table. The opcode definition will come from
+ the specified file, e.g. "alt OP_NOP arm" will load from
+ "arm/ALT_OP_NOP.S". A substitution dictionary will be applied
+ (see below).
+
+ op-end
+
+ Indicates the end of the opcode list. All kNumPackedOpcodes
+ opcodes are emitted when this is seen, followed by any code that
+ didn't fit inside the fixed-size instruction handler space.
+
+The order of "op" and "alt" directives are not significant; the generation
+tool will extract ordering info from the VM sources.
+
+Typically the form in which most opcodes currently exist is used in
+the "op-start" directive.
+
+==== Instruction file format ====
+
+The assembly instruction files are simply fragments of assembly sources.
+The starting label will be provided by the generation tool, as will
+declarations for the segment type and alignment. The expected target
+assembler is GNU "as", but others will work (may require fiddling with
+some of the pseudo-ops emitted by the generation tool).
+
+A substitution dictionary is applied to all opcode fragments as they are
+appended to the output. Substitutions can look like "$value" or "${value}".
+
+The dictionary always includes:
+
+ $opcode - opcode name, e.g. "OP_NOP"
+ $opnum - opcode number, e.g. 0 for OP_NOP
+ $handler_size_bytes - max size of an instruction handler, in bytes
+ $handler_size_bits - max size of an instruction handler, log 2
+
+Both C and assembly sources will be passed through the C pre-processor,
+so you can take advantage of C-style comments and preprocessor directives
+like "#define".
+
+Some generator operations are available.
+
+ %include "filename" [subst-dict]
+
+ Includes the file, which should look like "arm/OP_NOP.S". You can
+ specify values for the substitution dictionary, using standard Python
+ syntax. For example, this:
+ %include "arm/unop.S" {"result":"r1"}
+ would insert "arm/unop.S" at the current file position, replacing
+ occurrences of "$result" with "r1".
+
+ %default <subst-dict>
+
+ Specify default substitution dictionary values, using standard Python
+ syntax. Useful if you want to have a "base" version and variants.
+
+ %break
+
+ Identifies the split between the main portion of the instruction
+ handler (which must fit in "handler-size" bytes) and the "sister"
+ code, which is appended to the end of the instruction handler block.
+ In jump table implementations, %break is ignored.
+
+The generation tool does *not* print a warning if your instructions
+exceed "handler-size", but the VM will abort on startup if it detects an
+oversized handler. On architectures with fixed-width instructions this
+is easy to work with, on others this you will need to count bytes.
+
+
+==== Using C constants from assembly sources ====
+
+The file "art/runtime/asm_support.h" has some definitions for constant
+values, structure sizes, and struct member offsets. The format is fairly
+restricted, as simple macros are used to massage it for use with both C
+(where it is verified) and assembly (where the definitions are used).
+
+If a constant in the file becomes out of sync, the VM will log an error
+message and abort during startup.
+
+
+==== Development tips ====
+
+If you need to debug the initial piece of an opcode handler, and your
+debug code expands it beyond the handler size limit, you can insert a
+generic header at the top:
+
+ b ${opcode}_start
+%break
+${opcode}_start:
+
+If you already have a %break, it's okay to leave it in place -- the second
+%break is ignored.
+
+
+==== Rebuilding ====
+
+If you change any of the source file fragments, you need to rebuild the
+combined source files in the "out" directory. Make sure the files in
+"out" are editable, then:
+
+ $ cd mterp
+ $ ./rebuild.sh
+
+The ultimate goal is to have the build system generate the necessary
+output files without requiring this separate step, but we're not yet
+ready to require Python in the build.
+
+==== Interpreter Control ====
+
+The mterp fast interpreter achieves much of its performance advantage
+over the C++ interpreter through its efficient mechanism of
+transitioning from one Dalvik bytecode to the next. Mterp for ARM targets
+uses a computed-goto mechanism, in which the handler entrypoints are
+located at the base of the handler table + (opcode * 128).
+
+In normal operation, the dedicated register rIBASE
+(r8 for ARM, edx for x86) holds a mainHandlerTable. If we need to switch
+to a mode that requires inter-instruction checking, rIBASE is changed
+to altHandlerTable. Note that this change is not immediate. What is actually
+changed is the value of curHandlerTable - which is part of the interpBreak
+structure. Rather than explicitly check for changes, each thread will
+blindly refresh rIBASE at backward branches, exception throws and returns.
diff --git a/runtime/interpreter/mterp/arm/alt_stub.S b/runtime/interpreter/mterp/arm/alt_stub.S
new file mode 100644
index 0000000000..92ae0c6081
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/alt_stub.S
@@ -0,0 +1,12 @@
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (${opnum} * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
diff --git a/runtime/interpreter/mterp/arm/bincmp.S b/runtime/interpreter/mterp/arm/bincmp.S
new file mode 100644
index 0000000000..474bc3c276
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/bincmp.S
@@ -0,0 +1,36 @@
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ mov${revcmp} r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ mov${revcmp} r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm/binop.S b/runtime/interpreter/mterp/arm/binop.S
new file mode 100644
index 0000000000..eeb72ef65b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/binop.S
@@ -0,0 +1,35 @@
+%default {"preinstr":"", "result":"r0", "chkzero":"0"}
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if $chkzero
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $preinstr @ optional op; may set condition codes
+ $instr @ $result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG $result, r9 @ vAA<- $result
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm/binop2addr.S b/runtime/interpreter/mterp/arm/binop2addr.S
new file mode 100644
index 0000000000..d09a43ae48
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/binop2addr.S
@@ -0,0 +1,32 @@
+%default {"preinstr":"", "result":"r0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if $chkzero
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ $preinstr @ optional op; may set condition codes
+ $instr @ $result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG $result, r9 @ vAA<- $result
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopLit16.S b/runtime/interpreter/mterp/arm/binopLit16.S
new file mode 100644
index 0000000000..065394e4ef
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/binopLit16.S
@@ -0,0 +1,29 @@
+%default {"result":"r0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if $chkzero
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ $instr @ $result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG $result, r9 @ vAA<- $result
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopLit8.S b/runtime/interpreter/mterp/arm/binopLit8.S
new file mode 100644
index 0000000000..ec0b3c445d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/binopLit8.S
@@ -0,0 +1,32 @@
+%default {"preinstr":"", "result":"r0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if $chkzero
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ $preinstr @ optional op; may set condition codes
+ $instr @ $result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG $result, r9 @ vAA<- $result
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopWide.S b/runtime/interpreter/mterp/arm/binopWide.S
new file mode 100644
index 0000000000..57d43c651a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/binopWide.S
@@ -0,0 +1,38 @@
+%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if $chkzero
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ $preinstr @ optional op; may set condition codes
+ $instr @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopWide2addr.S b/runtime/interpreter/mterp/arm/binopWide2addr.S
new file mode 100644
index 0000000000..4e855f2d16
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/binopWide2addr.S
@@ -0,0 +1,34 @@
+%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if $chkzero
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ $preinstr @ optional op; may set condition codes
+ $instr @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
diff --git a/runtime/interpreter/mterp/arm/entry.S b/runtime/interpreter/mterp/arm/entry.S
new file mode 100644
index 0000000000..4c5ffc5b4e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/entry.S
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+ .text
+ .align 2
+ .global ExecuteMterpImpl
+ .type ExecuteMterpImpl, %function
+
+/*
+ * On entry:
+ * r0 Thread* self/
+ * r1 code_item
+ * r2 ShadowFrame
+ * r3 JValue* result_register
+ *
+ */
+
+ExecuteMterpImpl:
+ .fnstart
+ .save {r4-r10,fp,lr}
+ stmfd sp!, {r4-r10,fp,lr} @ save 9 regs
+ .pad #4
+ sub sp, sp, #4 @ align 64
+
+ /* Remember the return register */
+ str r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
+
+ /* Remember the code_item */
+ str r1, [r2, #SHADOWFRAME_CODE_ITEM_OFFSET]
+
+ /* set up "named" registers */
+ mov rSELF, r0
+ ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
+ add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to insns[] (i.e. - the dalivk byte code).
+ add rREFS, rFP, r0, lsl #2 @ point to reference array in shadow frame
+ ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
+ add rPC, r1, #CODEITEM_INSNS_OFFSET @ Point to base of insns[]
+ add rPC, rPC, r0, lsl #1 @ Create direct pointer to 1st dex opcode
+ EXPORT_PC
+
+ /* Starting ibase */
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+
+ /* start executing the instruction at rPC */
+ FETCH_INST @ load rINST from rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+ /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/arm/fallback.S b/runtime/interpreter/mterp/arm/fallback.S
new file mode 100644
index 0000000000..44e7e1220d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/fallback.S
@@ -0,0 +1,3 @@
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
diff --git a/runtime/interpreter/mterp/arm/fbinop.S b/runtime/interpreter/mterp/arm/fbinop.S
new file mode 100644
index 0000000000..594ee032d1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/fbinop.S
@@ -0,0 +1,23 @@
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $instr @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/fbinop2addr.S b/runtime/interpreter/mterp/arm/fbinop2addr.S
new file mode 100644
index 0000000000..b052a29a88
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/fbinop2addr.S
@@ -0,0 +1,21 @@
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ $instr @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/fbinopWide.S b/runtime/interpreter/mterp/arm/fbinopWide.S
new file mode 100644
index 0000000000..1bed817824
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/fbinopWide.S
@@ -0,0 +1,23 @@
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $instr @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/fbinopWide2addr.S b/runtime/interpreter/mterp/arm/fbinopWide2addr.S
new file mode 100644
index 0000000000..9f56986db8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/fbinopWide2addr.S
@@ -0,0 +1,22 @@
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ $instr @ d2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/footer.S b/runtime/interpreter/mterp/arm/footer.S
new file mode 100644
index 0000000000..617f572c0f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/footer.S
@@ -0,0 +1,167 @@
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+#define MTERP_LOGGING 0
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogDivideByZeroException
+#endif
+ b MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogArrayIndexException
+#endif
+ b MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNegativeArraySizeException
+#endif
+ b MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNoSuchMethodException
+#endif
+ b MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNullObjectException
+#endif
+ b MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogExceptionThrownException
+#endif
+ b MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ ldr r2, [rSELF, #THREAD_FLAGS_OFFSET]
+ bl MterpLogSuspendFallback
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ ldr r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ cmp r0, #0 @ Exception pending?
+ beq MterpFallback @ If not, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpHandleException @ (self, shadow_frame)
+ cmp r0, #0
+ beq MterpExceptionReturn @ no local catch, back to caller.
+ ldr r0, [rFP, #OFF_FP_CODE_ITEM]
+ ldr r1, [rFP, #OFF_FP_DEX_PC]
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+ add rPC, r0, #CODEITEM_INSNS_OFFSET
+ add rPC, rPC, r1, lsl #1 @ generate new dex_pc_ptr
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+ /* resume execution at catch block */
+ FETCH_INST
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+ /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request. Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ EXPORT_PC
+ mov r0, rSELF
+ ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ blne MterpSuspendCheck @ (self)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogFallback
+#endif
+MterpCommonFallback:
+ mov r0, #0 @ signal retry with reference interpreter.
+ b MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ mov r0, #1 @ signal return to caller.
+ b MterpDone
+MterpReturn:
+ ldr r2, [rFP, #OFF_FP_RESULT_REGISTER]
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ str r0, [r2]
+ str r1, [r2, #4]
+ mov r0, rSELF
+ ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ blne MterpSuspendCheck @ (self)
+ mov r0, #1 @ signal return to caller.
+MterpDone:
+ add sp, sp, #4 @ un-align 64
+ ldmfd sp!, {r4-r10,fp,pc} @ restore 9 regs and return
+
+
+ .fnend
+ .size ExecuteMterpImpl, .-ExecuteMterpImpl
+
diff --git a/runtime/interpreter/mterp/arm/funop.S b/runtime/interpreter/mterp/arm/funop.S
new file mode 100644
index 0000000000..d7a0859cdc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/funop.S
@@ -0,0 +1,18 @@
+ /*
+ * Generic 32-bit unary floating-point operation. Provide an "instr"
+ * line that specifies an instruction that performs "s1 = op s0".
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ $instr @ s1<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s1, [r9] @ vA<- s1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/funopNarrower.S b/runtime/interpreter/mterp/arm/funopNarrower.S
new file mode 100644
index 0000000000..9daec28556
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/funopNarrower.S
@@ -0,0 +1,18 @@
+ /*
+ * Generic 64bit-to-32bit unary floating point operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op d0".
+ *
+ * For: double-to-int, double-to-float
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ fldd d0, [r3] @ d0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ $instr @ s0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s0, [r9] @ vA<- s0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/funopWider.S b/runtime/interpreter/mterp/arm/funopWider.S
new file mode 100644
index 0000000000..087a1f2faf
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/funopWider.S
@@ -0,0 +1,18 @@
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op s0".
+ *
+ * For: int-to-double, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ $instr @ d0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fstd d0, [r9] @ vA<- d0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/header.S b/runtime/interpreter/mterp/arm/header.S
new file mode 100644
index 0000000000..14319d953f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/header.S
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via rFP &
+ number_of_vregs_.
+
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them. If VFP
+is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
+s0-s15 (d0-d7, q0-a3) do not need to be.
+
+Stack is "full descending". Only the arguments that don't fit in the first 4
+registers are placed on the stack. "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+Mterp and ARM notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ r4 rPC interpreted program counter, used for fetching instructions
+ r5 rFP interpreted frame pointer, used for accessing locals and args
+ r6 rSELF self (Thread) pointer
+ r7 rINST first 16-bit code unit of current instruction
+ r8 rIBASE interpreted instruction base pointer, used for computed goto
+ r11 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rPC r4
+#define rFP r5
+#define rSELF r6
+#define rINST r7
+#define rIBASE r8
+#define rREFS r11
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+/*
+ *
+ * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
+ * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
+ * mterp should do so as well.
+ */
+#define MTERP_SUSPEND 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+.endm
+
+.macro EXPORT_DEX_PC tmp
+ ldr \tmp, [rFP, #OFF_FP_CODE_ITEM]
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+ add \tmp, #CODEITEM_INSNS_OFFSET
+ sub \tmp, rPC, \tmp
+ asr \tmp, #1
+ str \tmp, [rFP, #OFF_FP_DEX_PC]
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+.macro FETCH_INST
+ ldrh rINST, [rPC]
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+ ldrh rINST, [rPC, #((\count)*2)]!
+.endm
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to rPC and rINST).
+ */
+.macro PREFETCH_ADVANCE_INST dreg, sreg, count
+ ldrh \dreg, [\sreg, #((\count)*2)]!
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
+ * rINST ahead of possible exception point. Be sure to manually advance rPC
+ * later.
+ */
+.macro PREFETCH_INST count
+ ldrh rINST, [rPC, #((\count)*2)]
+.endm
+
+/* Advance rPC by some number of code units. */
+.macro ADVANCE count
+ add rPC, #((\count)*2)
+.endm
+
+/*
+ * Fetch the next instruction from an offset specified by _reg. Updates
+ * rPC to point to the next instruction. "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ *
+ * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
+ * bits that hold the shift distance are used for the half/byte/sign flags.
+ * In some cases we can pre-double _reg for free, so we require a byte offset
+ * here.
+ */
+.macro FETCH_ADVANCE_INST_RB reg
+ ldrh rINST, [rPC, \reg]!
+.endm
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+.macro FETCH reg, count
+ ldrh \reg, [rPC, #((\count)*2)]
+.endm
+
+.macro FETCH_S reg, count
+ ldrsh \reg, [rPC, #((\count)*2)]
+.endm
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+.macro FETCH_B reg, count, byte
+ ldrb \reg, [rPC, #((\count)*2+(\byte))]
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+ and \reg, rINST, #255
+.endm
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+.macro GET_PREFETCHED_OPCODE oreg, ireg
+ and \oreg, \ireg, #255
+.endm
+
+/*
+ * Begin executing the opcode in _reg. Because this only jumps within the
+ * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
+ */
+.macro GOTO_OPCODE reg
+ add pc, rIBASE, \reg, lsl #${handler_size_bits}
+.endm
+.macro GOTO_OPCODE_BASE base,reg
+ add pc, \base, \reg, lsl #${handler_size_bits}
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+.macro GET_VREG reg, vreg
+ ldr \reg, [rFP, \vreg, lsl #2]
+.endm
+.macro SET_VREG reg, vreg
+ str \reg, [rFP, \vreg, lsl #2]
+ mov \reg, #0
+ str \reg, [rREFS, \vreg, lsl #2]
+.endm
+.macro SET_VREG_OBJECT reg, vreg, tmpreg
+ str \reg, [rFP, \vreg, lsl #2]
+ str \reg, [rREFS, \vreg, lsl #2]
+.endm
+
+/*
+ * Convert a virtual register index into an address.
+ */
+.macro VREG_INDEX_TO_ADDR reg, vreg
+ add \reg, rFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+.endm
diff --git a/runtime/interpreter/mterp/arm/invoke.S b/runtime/interpreter/mterp/arm/invoke.S
new file mode 100644
index 0000000000..7575865f1b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/invoke.S
@@ -0,0 +1,19 @@
+%default { "helper":"UndefinedInvokeHandler" }
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern $helper
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl $helper
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
diff --git a/runtime/interpreter/mterp/arm/op_add_double.S b/runtime/interpreter/mterp/arm/op_add_double.S
new file mode 100644
index 0000000000..9332bf2005
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_double.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide.S" {"instr":"faddd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_double_2addr.S b/runtime/interpreter/mterp/arm/op_add_double_2addr.S
new file mode 100644
index 0000000000..3242c53f65
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_double_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide2addr.S" {"instr":"faddd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_float.S b/runtime/interpreter/mterp/arm/op_add_float.S
new file mode 100644
index 0000000000..afb7967eb7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_float.S
@@ -0,0 +1 @@
+%include "arm/fbinop.S" {"instr":"fadds s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_float_2addr.S b/runtime/interpreter/mterp/arm/op_add_float_2addr.S
new file mode 100644
index 0000000000..0067b6a010
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_float_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinop2addr.S" {"instr":"fadds s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int.S b/runtime/interpreter/mterp/arm/op_add_int.S
new file mode 100644
index 0000000000..1dcae7eab3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"instr":"add r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int_2addr.S b/runtime/interpreter/mterp/arm/op_add_int_2addr.S
new file mode 100644
index 0000000000..9ea98f1928
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"instr":"add r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int_lit16.S b/runtime/interpreter/mterp/arm/op_add_int_lit16.S
new file mode 100644
index 0000000000..5763ab849b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_int_lit16.S
@@ -0,0 +1 @@
+%include "arm/binopLit16.S" {"instr":"add r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int_lit8.S b/runtime/interpreter/mterp/arm/op_add_int_lit8.S
new file mode 100644
index 0000000000..b84684a3a3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"instr":"add r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_long.S b/runtime/interpreter/mterp/arm/op_add_long.S
new file mode 100644
index 0000000000..093223e755
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_long.S
@@ -0,0 +1 @@
+%include "arm/binopWide.S" {"preinstr":"adds r0, r0, r2", "instr":"adc r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_add_long_2addr.S b/runtime/interpreter/mterp/arm/op_add_long_2addr.S
new file mode 100644
index 0000000000..c11e0aff44
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_long_2addr.S
@@ -0,0 +1 @@
+%include "arm/binopWide2addr.S" {"preinstr":"adds r0, r0, r2", "instr":"adc r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_aget.S b/runtime/interpreter/mterp/arm/op_aget.S
new file mode 100644
index 0000000000..11f7079c3f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget.S
@@ -0,0 +1,29 @@
+%default { "load":"ldr", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $load r2, [r0, #$data_offset] @ r2<- vBB[vCC]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r2, r9 @ vAA<- r2
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aget_boolean.S b/runtime/interpreter/mterp/arm/op_aget_boolean.S
new file mode 100644
index 0000000000..8f678dc14f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget_boolean.S
@@ -0,0 +1 @@
+%include "arm/op_aget.S" { "load":"ldrb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_byte.S b/runtime/interpreter/mterp/arm/op_aget_byte.S
new file mode 100644
index 0000000000..a304650688
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget_byte.S
@@ -0,0 +1 @@
+%include "arm/op_aget.S" { "load":"ldrsb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_char.S b/runtime/interpreter/mterp/arm/op_aget_char.S
new file mode 100644
index 0000000000..490830620e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget_char.S
@@ -0,0 +1 @@
+%include "arm/op_aget.S" { "load":"ldrh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_object.S b/runtime/interpreter/mterp/arm/op_aget_object.S
new file mode 100644
index 0000000000..4e0aab5d13
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget_object.S
@@ -0,0 +1,21 @@
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ EXPORT_PC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ bl artAGetObjectFromMterp @ (array, index)
+ ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ PREFETCH_INST 2
+ cmp r1, #0
+ bne MterpException
+ SET_VREG_OBJECT r0, r9
+ ADVANCE 2
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aget_short.S b/runtime/interpreter/mterp/arm/op_aget_short.S
new file mode 100644
index 0000000000..b71e659a4a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget_short.S
@@ -0,0 +1 @@
+%include "arm/op_aget.S" { "load":"ldrsh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_wide.S b/runtime/interpreter/mterp/arm/op_aget_wide.S
new file mode 100644
index 0000000000..caaec71d02
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget_wide.S
@@ -0,0 +1,24 @@
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_and_int.S b/runtime/interpreter/mterp/arm/op_and_int.S
new file mode 100644
index 0000000000..7c16d376be
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_and_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"instr":"and r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_int_2addr.S b/runtime/interpreter/mterp/arm/op_and_int_2addr.S
new file mode 100644
index 0000000000..0fbab02863
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_and_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"instr":"and r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_int_lit16.S b/runtime/interpreter/mterp/arm/op_and_int_lit16.S
new file mode 100644
index 0000000000..541e9b7814
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_and_int_lit16.S
@@ -0,0 +1 @@
+%include "arm/binopLit16.S" {"instr":"and r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_int_lit8.S b/runtime/interpreter/mterp/arm/op_and_int_lit8.S
new file mode 100644
index 0000000000..d5783e52e1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_and_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"instr":"and r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_long.S b/runtime/interpreter/mterp/arm/op_and_long.S
new file mode 100644
index 0000000000..4ad5158da7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_and_long.S
@@ -0,0 +1 @@
+%include "arm/binopWide.S" {"preinstr":"and r0, r0, r2", "instr":"and r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_and_long_2addr.S b/runtime/interpreter/mterp/arm/op_and_long_2addr.S
new file mode 100644
index 0000000000..e23ea447ba
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_and_long_2addr.S
@@ -0,0 +1 @@
+%include "arm/binopWide2addr.S" {"preinstr":"and r0, r0, r2", "instr":"and r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_aput.S b/runtime/interpreter/mterp/arm/op_aput.S
new file mode 100644
index 0000000000..a511fa59e0
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput.S
@@ -0,0 +1,29 @@
+%default { "store":"str", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r9 @ r2<- vAA
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ $store r2, [r0, #$data_offset] @ vBB[vCC]<- r2
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aput_boolean.S b/runtime/interpreter/mterp/arm/op_aput_boolean.S
new file mode 100644
index 0000000000..e86663f199
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput_boolean.S
@@ -0,0 +1 @@
+%include "arm/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_byte.S b/runtime/interpreter/mterp/arm/op_aput_byte.S
new file mode 100644
index 0000000000..83694b788d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput_byte.S
@@ -0,0 +1 @@
+%include "arm/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_char.S b/runtime/interpreter/mterp/arm/op_aput_char.S
new file mode 100644
index 0000000000..3551cace33
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput_char.S
@@ -0,0 +1 @@
+%include "arm/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_object.S b/runtime/interpreter/mterp/arm/op_aput_object.S
new file mode 100644
index 0000000000..c5399163e3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput_object.S
@@ -0,0 +1,14 @@
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ bl MterpAputObject
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aput_short.S b/runtime/interpreter/mterp/arm/op_aput_short.S
new file mode 100644
index 0000000000..0a0590ec36
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput_short.S
@@ -0,0 +1 @@
+%include "arm/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_wide.S b/runtime/interpreter/mterp/arm/op_aput_wide.S
new file mode 100644
index 0000000000..49839d1b24
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput_wide.S
@@ -0,0 +1,24 @@
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ strd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_array_length.S b/runtime/interpreter/mterp/arm/op_array_length.S
new file mode 100644
index 0000000000..43b1682a9d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_array_length.S
@@ -0,0 +1,13 @@
+ /*
+ * Return the length of an array.
+ */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r0, r1 @ r0<- vB (object ref)
+ cmp r0, #0 @ is object null?
+ beq common_errNullObject @ yup, fail
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- array length
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r3, r2 @ vB<- length
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_check_cast.S b/runtime/interpreter/mterp/arm/op_check_cast.S
new file mode 100644
index 0000000000..3e3ac707c4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_check_cast.S
@@ -0,0 +1,17 @@
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ GET_VREG r1, r1 @ r1<- object
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
+ mov r3, rSELF @ r3<- self
+ bl MterpCheckCast @ (index, obj, method, self)
+ PREFETCH_INST 2
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmp_long.S b/runtime/interpreter/mterp/arm/op_cmp_long.S
new file mode 100644
index 0000000000..2b4c0ea5bb
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_cmp_long.S
@@ -0,0 +1,56 @@
+ /*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ *
+ * We load the full values with LDM, but in practice many values could
+ * be resolved by only looking at the high word. This could be made
+ * faster or slower by splitting the LDM into a pair of LDRs.
+ *
+ * If we just wanted to set condition flags, we could do this:
+ * subs ip, r0, r2
+ * sbcs ip, r1, r3
+ * subeqs ip, r0, r2
+ * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific
+ * integer value, which we can do with 2 conditional mov/mvn instructions
+ * (set 1, set -1; if they're equal we already have 0 in ip), giving
+ * us a constant 5-cycle path plus a branch at the end to the
+ * instruction epilogue code. The multi-compare approach below needs
+ * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
+ * in the worst case (the 64-bit values are equal).
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ cmp r1, r3 @ compare (vBB+1, vCC+1)
+ blt .L${opcode}_less @ signed compare on high part
+ bgt .L${opcode}_greater
+ subs r1, r0, r2 @ r1<- r0 - r2
+ bhi .L${opcode}_greater @ unsigned compare on low part
+ bne .L${opcode}_less
+ b .L${opcode}_finish @ equal; r1 already holds 0
+%break
+
+.L${opcode}_less:
+ mvn r1, #0 @ r1<- -1
+ @ Want to cond code the next mov so we can avoid branch, but don't see it;
+ @ instead, we just replicate the tail end.
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+.L${opcode}_greater:
+ mov r1, #1 @ r1<- 1
+ @ fall through to _finish
+
+.L${opcode}_finish:
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpg_double.S b/runtime/interpreter/mterp/arm/op_cmpg_double.S
new file mode 100644
index 0000000000..4b05c44beb
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_cmpg_double.S
@@ -0,0 +1,34 @@
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ fldd d0, [r2] @ d0<- vBB
+ fldd d1, [r3] @ d1<- vCC
+ fcmped d0, d1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, #1 @ r0<- 1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpg_float.S b/runtime/interpreter/mterp/arm/op_cmpg_float.S
new file mode 100644
index 0000000000..d5d2df2ef5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_cmpg_float.S
@@ -0,0 +1,34 @@
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ flds s0, [r2] @ s0<- vBB
+ flds s1, [r3] @ s1<- vCC
+ fcmpes s0, s1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, #1 @ r0<- 1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpl_double.S b/runtime/interpreter/mterp/arm/op_cmpl_double.S
new file mode 100644
index 0000000000..6ee53b301e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_cmpl_double.S
@@ -0,0 +1,34 @@
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ fldd d0, [r2] @ d0<- vBB
+ fldd d1, [r3] @ d1<- vCC
+ fcmped d0, d1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mvn r0, #0 @ r0<- -1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r1<- 1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpl_float.S b/runtime/interpreter/mterp/arm/op_cmpl_float.S
new file mode 100644
index 0000000000..64535b68ae
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_cmpl_float.S
@@ -0,0 +1,34 @@
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ flds s0, [r2] @ s0<- vBB
+ flds s1, [r3] @ s1<- vCC
+ fcmpes s0, s1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mvn r0, #0 @ r0<- -1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r1<- 1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const.S b/runtime/interpreter/mterp/arm/op_const.S
new file mode 100644
index 0000000000..de3e3c3c88
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const.S
@@ -0,0 +1,9 @@
+ /* const vAA, #+BBBBbbbb */
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH r0, 1 @ r0<- bbbb (low
+ FETCH r1, 2 @ r1<- BBBB (high
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_16.S b/runtime/interpreter/mterp/arm/op_const_16.S
new file mode 100644
index 0000000000..59c6dac10a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_16.S
@@ -0,0 +1,7 @@
+ /* const/16 vAA, #+BBBB */
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_4.S b/runtime/interpreter/mterp/arm/op_const_4.S
new file mode 100644
index 0000000000..c177bb9eb3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_4.S
@@ -0,0 +1,8 @@
+ /* const/4 vA, #+B */
+ mov r1, rINST, lsl #16 @ r1<- Bxxx0000
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended)
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ SET_VREG r1, r0 @ fp[A]<- r1
+ GOTO_OPCODE ip @ execute next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_class.S b/runtime/interpreter/mterp/arm/op_const_class.S
new file mode 100644
index 0000000000..0b111f4d06
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_class.S
@@ -0,0 +1,13 @@
+ /* const/class vAA, Class@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstClass @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_high16.S b/runtime/interpreter/mterp/arm/op_const_high16.S
new file mode 100644
index 0000000000..460d546f3b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_high16.S
@@ -0,0 +1,8 @@
+ /* const/high16 vAA, #+BBBB0000 */
+ FETCH r0, 1 @ r0<- 0000BBBB (zero-extended
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, r0, lsl #16 @ r0<- BBBB0000
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_string.S b/runtime/interpreter/mterp/arm/op_const_string.S
new file mode 100644
index 0000000000..4b8302a9ec
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_string.S
@@ -0,0 +1,13 @@
+ /* const/string vAA, String@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 @ load rINST
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_string_jumbo.S b/runtime/interpreter/mterp/arm/op_const_string_jumbo.S
new file mode 100644
index 0000000000..1a3d0b2542
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_string_jumbo.S
@@ -0,0 +1,15 @@
+ /* const/string vAA, String@BBBBBBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- bbbb (low
+ FETCH r2, 2 @ r2<- BBBB (high
+ mov r1, rINST, lsr #8 @ r1<- AA
+ orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 3 @ advance rPC
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 3 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide.S b/runtime/interpreter/mterp/arm/op_const_wide.S
new file mode 100644
index 0000000000..2cdc4261e6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_wide.S
@@ -0,0 +1,13 @@
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ FETCH r0, 1 @ r0<- bbbb (low)
+ FETCH r1, 2 @ r1<- BBBB (low middle)
+ FETCH r2, 3 @ r2<- hhhh (high middle)
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
+ FETCH r3, 4 @ r3<- HHHH (high)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
+ FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_16.S b/runtime/interpreter/mterp/arm/op_const_wide_16.S
new file mode 100644
index 0000000000..56bfc17a91
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_wide_16.S
@@ -0,0 +1,9 @@
+ /* const-wide/16 vAA, #+BBBB */
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_32.S b/runtime/interpreter/mterp/arm/op_const_wide_32.S
new file mode 100644
index 0000000000..36d4628502
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_wide_32.S
@@ -0,0 +1,11 @@
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ FETCH r0, 1 @ r0<- 0000bbbb (low)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_S r2, 2 @ r2<- ssssBBBB (high)
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_high16.S b/runtime/interpreter/mterp/arm/op_const_wide_high16.S
new file mode 100644
index 0000000000..bee592d16b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_wide_high16.S
@@ -0,0 +1,10 @@
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ FETCH r1, 1 @ r1<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, #0 @ r0<- 00000000
+ mov r1, r1, lsl #16 @ r1<- BBBB0000
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_div_double.S b/runtime/interpreter/mterp/arm/op_div_double.S
new file mode 100644
index 0000000000..5147550b97
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_double.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide.S" {"instr":"fdivd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_double_2addr.S b/runtime/interpreter/mterp/arm/op_div_double_2addr.S
new file mode 100644
index 0000000000..b812f17ac9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_double_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide2addr.S" {"instr":"fdivd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_float.S b/runtime/interpreter/mterp/arm/op_div_float.S
new file mode 100644
index 0000000000..0f24d11e54
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_float.S
@@ -0,0 +1 @@
+%include "arm/fbinop.S" {"instr":"fdivs s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_float_2addr.S b/runtime/interpreter/mterp/arm/op_div_float_2addr.S
new file mode 100644
index 0000000000..a1dbf01713
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_float_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinop2addr.S" {"instr":"fdivs s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_int.S b/runtime/interpreter/mterp/arm/op_div_int.S
new file mode 100644
index 0000000000..251064be0d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_int.S
@@ -0,0 +1,30 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int
+ *
+ */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_div_int_2addr.S b/runtime/interpreter/mterp/arm/op_div_int_2addr.S
new file mode 100644
index 0000000000..9be4cd8b14
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_int_2addr.S
@@ -0,0 +1,29 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/2addr
+ *
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
diff --git a/runtime/interpreter/mterp/arm/op_div_int_lit16.S b/runtime/interpreter/mterp/arm/op_div_int_lit16.S
new file mode 100644
index 0000000000..d9bc7d65ce
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_int_lit16.S
@@ -0,0 +1,28 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/lit16
+ *
+ */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_div_int_lit8.S b/runtime/interpreter/mterp/arm/op_div_int_lit8.S
new file mode 100644
index 0000000000..5d2dbd3ecb
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_int_lit8.S
@@ -0,0 +1,29 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/lit8
+ *
+ */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_div_long.S b/runtime/interpreter/mterp/arm/op_div_long.S
new file mode 100644
index 0000000000..0f21a845d4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_long.S
@@ -0,0 +1 @@
+%include "arm/binopWide.S" {"instr":"bl __aeabi_ldivmod", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_long_2addr.S b/runtime/interpreter/mterp/arm/op_div_long_2addr.S
new file mode 100644
index 0000000000..e172b29496
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_long_2addr.S
@@ -0,0 +1 @@
+%include "arm/binopWide2addr.S" {"instr":"bl __aeabi_ldivmod", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_double_to_float.S b/runtime/interpreter/mterp/arm/op_double_to_float.S
new file mode 100644
index 0000000000..e327000409
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_double_to_float.S
@@ -0,0 +1 @@
+%include "arm/funopNarrower.S" {"instr":"fcvtsd s0, d0"}
diff --git a/runtime/interpreter/mterp/arm/op_double_to_int.S b/runtime/interpreter/mterp/arm/op_double_to_int.S
new file mode 100644
index 0000000000..aa035de63a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_double_to_int.S
@@ -0,0 +1 @@
+%include "arm/funopNarrower.S" {"instr":"ftosizd s0, d0"}
diff --git a/runtime/interpreter/mterp/arm/op_double_to_long.S b/runtime/interpreter/mterp/arm/op_double_to_long.S
new file mode 100644
index 0000000000..b10081052e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_double_to_long.S
@@ -0,0 +1,52 @@
+@include "arm/unopWide.S" {"instr":"bl __aeabi_d2lz"}
+%include "arm/unopWide.S" {"instr":"bl d2l_doconv"}
+
+%break
+/*
+ * Convert the double in r0/r1 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+d2l_doconv:
+ stmfd sp!, {r4, r5, lr} @ save regs
+ mov r3, #0x43000000 @ maxlong, as a double (high word)
+ add r3, #0x00e00000 @ 0x43e00000
+ mov r2, #0 @ maxlong, as a double (low word)
+ sub sp, sp, #4 @ align for EABI
+ mov r4, r0 @ save a copy of r0
+ mov r5, r1 @ and r1
+ bl __aeabi_dcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffffffffffff)
+ mvnne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r3, #0xc3000000 @ minlong, as a double (high word)
+ add r3, #0x00e00000 @ 0xc3e00000
+ mov r2, #0 @ minlong, as a double (low word)
+ bl __aeabi_dcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (8000000000000000)
+ movne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r2, r4 @ compare against self
+ mov r3, r5
+ bl __aeabi_dcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ beq 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ bl __aeabi_d2lz @ convert double to long
+
+1:
+ add sp, sp, #4
+ ldmfd sp!, {r4, r5, pc}
diff --git a/runtime/interpreter/mterp/arm/op_fill_array_data.S b/runtime/interpreter/mterp/arm/op_fill_array_data.S
new file mode 100644
index 0000000000..e1ca85c866
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_fill_array_data.S
@@ -0,0 +1,14 @@
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
+ GET_VREG r0, r3 @ r0<- vAA (array object)
+ add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
+ bl MterpFillArrayData @ (obj, payload)
+ cmp r0, #0 @ 0 means an exception is thrown
+ beq MterpPossibleException @ exception?
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_filled_new_array.S b/runtime/interpreter/mterp/arm/op_filled_new_array.S
new file mode 100644
index 0000000000..1075f0c683
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_filled_new_array.S
@@ -0,0 +1,19 @@
+%default { "helper":"MterpFilledNewArray" }
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ .extern $helper
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rSELF
+ bl $helper
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_filled_new_array_range.S b/runtime/interpreter/mterp/arm/op_filled_new_array_range.S
new file mode 100644
index 0000000000..16567af567
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_filled_new_array_range.S
@@ -0,0 +1 @@
+%include "arm/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/arm/op_float_to_double.S b/runtime/interpreter/mterp/arm/op_float_to_double.S
new file mode 100644
index 0000000000..fb1892b6d0
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_float_to_double.S
@@ -0,0 +1 @@
+%include "arm/funopWider.S" {"instr":"fcvtds d0, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_float_to_int.S b/runtime/interpreter/mterp/arm/op_float_to_int.S
new file mode 100644
index 0000000000..aab87167bb
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_float_to_int.S
@@ -0,0 +1 @@
+%include "arm/funop.S" {"instr":"ftosizs s1, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_float_to_long.S b/runtime/interpreter/mterp/arm/op_float_to_long.S
new file mode 100644
index 0000000000..24416d33d2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_float_to_long.S
@@ -0,0 +1,39 @@
+@include "arm/unopWider.S" {"instr":"bl __aeabi_f2lz"}
+%include "arm/unopWider.S" {"instr":"bl f2l_doconv"}
+
+%break
+/*
+ * Convert the float in r0 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+f2l_doconv:
+ stmfd sp!, {r4, lr}
+ mov r1, #0x5f000000 @ (float)maxlong
+ mov r4, r0
+ bl __aeabi_fcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffff)
+ mvnne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, #0xdf000000 @ (float)minlong
+ bl __aeabi_fcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (80000000)
+ movne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, r4
+ bl __aeabi_fcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ ldmeqfd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ bl __aeabi_f2lz @ convert float to long
+ ldmfd sp!, {r4, pc}
diff --git a/runtime/interpreter/mterp/arm/op_goto.S b/runtime/interpreter/mterp/arm/op_goto.S
new file mode 100644
index 0000000000..9b3632aba2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_goto.S
@@ -0,0 +1,28 @@
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ /* tuning: use sbfx for 6t2+ targets */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsl #16 @ r0<- AAxx0000
+ movs r1, r0, asr #24 @ r1<- ssssssAA (sign-extended)
+ add r2, r1, r1 @ r2<- byte offset, set flags
+ @ If backwards branch refresh rIBASE
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ mov r0, rINST, lsl #16 @ r0<- AAxx0000
+ movs r1, r0, asr #24 @ r1<- ssssssAA (sign-extended)
+ add r2, r1, r1 @ r2<- byte offset, set flags
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ @ If backwards branch refresh rIBASE
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm/op_goto_16.S b/runtime/interpreter/mterp/arm/op_goto_16.S
new file mode 100644
index 0000000000..2231acdb9e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_goto_16.S
@@ -0,0 +1,23 @@
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+#if MTERP_SUSPEND
+ FETCH_S r0, 1 @ r0<- ssssAAAA (sign-extended)
+ adds r1, r0, r0 @ r1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH_S r0, 1 @ r0<- ssssAAAA (sign-extended)
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ adds r1, r0, r0 @ r1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm/op_goto_32.S b/runtime/interpreter/mterp/arm/op_goto_32.S
new file mode 100644
index 0000000000..6b72ff5ce2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_goto_32.S
@@ -0,0 +1,32 @@
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0". Because
+ * we need the V bit set, we'll use an adds to convert from Dalvik
+ * offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+#if MTERP_SUSPEND
+ FETCH r0, 1 @ r0<- aaaa (lo)
+ FETCH r1, 2 @ r1<- AAAA (hi)
+ orr r0, r0, r1, lsl #16 @ r0<- AAAAaaaa
+ adds r1, r0, r0 @ r1<- byte offset
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrle rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH r0, 1 @ r0<- aaaa (lo)
+ FETCH r1, 2 @ r1<- AAAA (hi)
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ orr r0, r0, r1, lsl #16 @ r0<- AAAAaaaa
+ adds r1, r0, r0 @ r1<- byte offset
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ble MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm/op_if_eq.S b/runtime/interpreter/mterp/arm/op_if_eq.S
new file mode 100644
index 0000000000..568568662f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_eq.S
@@ -0,0 +1 @@
+%include "arm/bincmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/arm/op_if_eqz.S b/runtime/interpreter/mterp/arm/op_if_eqz.S
new file mode 100644
index 0000000000..2a9c0f9e17
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_eqz.S
@@ -0,0 +1 @@
+%include "arm/zcmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/arm/op_if_ge.S b/runtime/interpreter/mterp/arm/op_if_ge.S
new file mode 100644
index 0000000000..60a0307a15
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_ge.S
@@ -0,0 +1 @@
+%include "arm/bincmp.S" { "revcmp":"lt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_gez.S b/runtime/interpreter/mterp/arm/op_if_gez.S
new file mode 100644
index 0000000000..981cdec0fa
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_gez.S
@@ -0,0 +1 @@
+%include "arm/zcmp.S" { "revcmp":"lt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_gt.S b/runtime/interpreter/mterp/arm/op_if_gt.S
new file mode 100644
index 0000000000..ca50cd7561
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_gt.S
@@ -0,0 +1 @@
+%include "arm/bincmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/arm/op_if_gtz.S b/runtime/interpreter/mterp/arm/op_if_gtz.S
new file mode 100644
index 0000000000..c621812439
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_gtz.S
@@ -0,0 +1 @@
+%include "arm/zcmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/arm/op_if_le.S b/runtime/interpreter/mterp/arm/op_if_le.S
new file mode 100644
index 0000000000..7e060f2fc9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_le.S
@@ -0,0 +1 @@
+%include "arm/bincmp.S" { "revcmp":"gt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_lez.S b/runtime/interpreter/mterp/arm/op_if_lez.S
new file mode 100644
index 0000000000..f92be23717
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_lez.S
@@ -0,0 +1 @@
+%include "arm/zcmp.S" { "revcmp":"gt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_lt.S b/runtime/interpreter/mterp/arm/op_if_lt.S
new file mode 100644
index 0000000000..213344d809
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_lt.S
@@ -0,0 +1 @@
+%include "arm/bincmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/arm/op_if_ltz.S b/runtime/interpreter/mterp/arm/op_if_ltz.S
new file mode 100644
index 0000000000..dfd4e44856
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_ltz.S
@@ -0,0 +1 @@
+%include "arm/zcmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/arm/op_if_ne.S b/runtime/interpreter/mterp/arm/op_if_ne.S
new file mode 100644
index 0000000000..4a58b4aba0
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_ne.S
@@ -0,0 +1 @@
+%include "arm/bincmp.S" { "revcmp":"eq" }
diff --git a/runtime/interpreter/mterp/arm/op_if_nez.S b/runtime/interpreter/mterp/arm/op_if_nez.S
new file mode 100644
index 0000000000..d864ef437b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_nez.S
@@ -0,0 +1 @@
+%include "arm/zcmp.S" { "revcmp":"eq" }
diff --git a/runtime/interpreter/mterp/arm/op_iget.S b/runtime/interpreter/mterp/arm/op_iget.S
new file mode 100644
index 0000000000..c7f777b71e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget.S
@@ -0,0 +1,26 @@
+%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl $helper
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if $is_object
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_boolean.S b/runtime/interpreter/mterp/arm/op_iget_boolean.S
new file mode 100644
index 0000000000..628f40a7e5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_boolean.S
@@ -0,0 +1 @@
+%include "arm/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_boolean_quick.S b/runtime/interpreter/mterp/arm/op_iget_boolean_quick.S
new file mode 100644
index 0000000000..0ae4843595
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_boolean_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iget_quick.S" { "load":"ldrb" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_byte.S b/runtime/interpreter/mterp/arm/op_iget_byte.S
new file mode 100644
index 0000000000..c4e08e2ca2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_byte.S
@@ -0,0 +1 @@
+%include "arm/op_iget.S" { "helper":"artGetByteInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_byte_quick.S b/runtime/interpreter/mterp/arm/op_iget_byte_quick.S
new file mode 100644
index 0000000000..e1b3083404
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_byte_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iget_quick.S" { "load":"ldrsb" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_char.S b/runtime/interpreter/mterp/arm/op_iget_char.S
new file mode 100644
index 0000000000..5e8da66a93
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_char.S
@@ -0,0 +1 @@
+%include "arm/op_iget.S" { "helper":"artGetCharInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_char_quick.S b/runtime/interpreter/mterp/arm/op_iget_char_quick.S
new file mode 100644
index 0000000000..b44d8f14d8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_char_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iget_quick.S" { "load":"ldrh" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_object.S b/runtime/interpreter/mterp/arm/op_iget_object.S
new file mode 100644
index 0000000000..1cf2e3cb8d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_object.S
@@ -0,0 +1 @@
+%include "arm/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_object_quick.S b/runtime/interpreter/mterp/arm/op_iget_object_quick.S
new file mode 100644
index 0000000000..fe29106d47
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_object_quick.S
@@ -0,0 +1,17 @@
+ /* For: iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r0, r2 @ r0<- object we're operating on
+ cmp r0, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ bl artIGetObjectFromMterp @ (obj, offset)
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_quick.S b/runtime/interpreter/mterp/arm/op_iget_quick.S
new file mode 100644
index 0000000000..0eaf364f6b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_quick.S
@@ -0,0 +1,14 @@
+%default { "load":"ldr" }
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ $load r0, [r3, r1] @ r0<- obj.field
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r2 @ fp[A]<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_short.S b/runtime/interpreter/mterp/arm/op_iget_short.S
new file mode 100644
index 0000000000..460f0450ca
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_short.S
@@ -0,0 +1 @@
+%include "arm/op_iget.S" { "helper":"artGetShortInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_short_quick.S b/runtime/interpreter/mterp/arm/op_iget_short_quick.S
new file mode 100644
index 0000000000..1831b99ac3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_short_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iget_quick.S" { "load":"ldrsh" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide.S b/runtime/interpreter/mterp/arm/op_iget_wide.S
new file mode 100644
index 0000000000..f8d2f41dea
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_wide.S
@@ -0,0 +1,22 @@
+ /*
+ * 64-bit instance field get.
+ *
+ * for: iget-wide
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGet64InstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpException @ bail out
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
new file mode 100644
index 0000000000..4d6976e085
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
@@ -0,0 +1,13 @@
+ /* iget-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH ip, 1 @ ip<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_instance_of.S b/runtime/interpreter/mterp/arm/op_instance_of.S
new file mode 100644
index 0000000000..e94108ca06
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_instance_of.S
@@ -0,0 +1,24 @@
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- vB (object)
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
+ mov r3, rSELF @ r3<- self
+ mov r9, rINST, lsr #8 @ r9<- A+
+ and r9, r9, #15 @ r9<- A
+ bl MterpInstanceOf @ (index, obj, method, self)
+ ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ PREFETCH_INST 2
+ cmp r1, #0 @ exception pending?
+ bne MterpException
+ ADVANCE 2 @ advance rPC
+ SET_VREG r0, r9 @ vA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_int_to_byte.S b/runtime/interpreter/mterp/arm/op_int_to_byte.S
new file mode 100644
index 0000000000..059d5c2cf5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_int_to_byte.S
@@ -0,0 +1 @@
+%include "arm/unop.S" {"instr":"sxtb r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_char.S b/runtime/interpreter/mterp/arm/op_int_to_char.S
new file mode 100644
index 0000000000..83a0c196d6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_int_to_char.S
@@ -0,0 +1 @@
+%include "arm/unop.S" {"instr":"uxth r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_double.S b/runtime/interpreter/mterp/arm/op_int_to_double.S
new file mode 100644
index 0000000000..810c2e49bb
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_int_to_double.S
@@ -0,0 +1 @@
+%include "arm/funopWider.S" {"instr":"fsitod d0, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_float.S b/runtime/interpreter/mterp/arm/op_int_to_float.S
new file mode 100644
index 0000000000..f41654c678
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_int_to_float.S
@@ -0,0 +1 @@
+%include "arm/funop.S" {"instr":"fsitos s1, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_long.S b/runtime/interpreter/mterp/arm/op_int_to_long.S
new file mode 100644
index 0000000000..b5aed8e056
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_int_to_long.S
@@ -0,0 +1 @@
+%include "arm/unopWider.S" {"instr":"mov r1, r0, asr #31"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_short.S b/runtime/interpreter/mterp/arm/op_int_to_short.S
new file mode 100644
index 0000000000..717bd96bd1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_int_to_short.S
@@ -0,0 +1 @@
+%include "arm/unop.S" {"instr":"sxth r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_invoke_direct.S b/runtime/interpreter/mterp/arm/op_invoke_direct.S
new file mode 100644
index 0000000000..1edf221914
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_direct.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_direct_range.S b/runtime/interpreter/mterp/arm/op_invoke_direct_range.S
new file mode 100644
index 0000000000..3097b8e2c7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_direct_range.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_interface.S b/runtime/interpreter/mterp/arm/op_invoke_interface.S
new file mode 100644
index 0000000000..f6d565b168
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_interface.S
@@ -0,0 +1,8 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeInterface" }
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_interface_range.S b/runtime/interpreter/mterp/arm/op_invoke_interface_range.S
new file mode 100644
index 0000000000..c8443b0cdc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_interface_range.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_static.S b/runtime/interpreter/mterp/arm/op_invoke_static.S
new file mode 100644
index 0000000000..c3cefcff46
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_static.S
@@ -0,0 +1,2 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeStatic" }
+
diff --git a/runtime/interpreter/mterp/arm/op_invoke_static_range.S b/runtime/interpreter/mterp/arm/op_invoke_static_range.S
new file mode 100644
index 0000000000..dd60d7bbfa
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_static_range.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_super.S b/runtime/interpreter/mterp/arm/op_invoke_super.S
new file mode 100644
index 0000000000..92ef2a4e3e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_super.S
@@ -0,0 +1,8 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeSuper" }
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_super_range.S b/runtime/interpreter/mterp/arm/op_invoke_super_range.S
new file mode 100644
index 0000000000..9e4fb1c9a1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_super_range.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual.S b/runtime/interpreter/mterp/arm/op_invoke_virtual.S
new file mode 100644
index 0000000000..5b893ff866
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_virtual.S
@@ -0,0 +1,8 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeVirtual" }
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/arm/op_invoke_virtual_quick.S
new file mode 100644
index 0000000000..020e8b8135
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_virtual_quick.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual_range.S b/runtime/interpreter/mterp/arm/op_invoke_virtual_range.S
new file mode 100644
index 0000000000..2b42a7863a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_virtual_range.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/arm/op_invoke_virtual_range_quick.S
new file mode 100644
index 0000000000..42f2deda39
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_virtual_range_quick.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/arm/op_iput.S b/runtime/interpreter/mterp/arm/op_iput.S
new file mode 100644
index 0000000000..d224cd8a77
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput.S
@@ -0,0 +1,22 @@
+%default { "is_object":"0", "handler":"artSet32InstanceFromMterp" }
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern $handler
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r2, r2 @ r2<- fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl $handler
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_boolean.S b/runtime/interpreter/mterp/arm/op_iput_boolean.S
new file mode 100644
index 0000000000..c9e8589de5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_boolean.S
@@ -0,0 +1 @@
+%include "arm/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_boolean_quick.S b/runtime/interpreter/mterp/arm/op_iput_boolean_quick.S
new file mode 100644
index 0000000000..f0a2777821
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_boolean_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_byte.S b/runtime/interpreter/mterp/arm/op_iput_byte.S
new file mode 100644
index 0000000000..c9e8589de5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_byte.S
@@ -0,0 +1 @@
+%include "arm/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_byte_quick.S b/runtime/interpreter/mterp/arm/op_iput_byte_quick.S
new file mode 100644
index 0000000000..f0a2777821
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_byte_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_char.S b/runtime/interpreter/mterp/arm/op_iput_char.S
new file mode 100644
index 0000000000..5046f6b6a3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_char.S
@@ -0,0 +1 @@
+%include "arm/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_char_quick.S b/runtime/interpreter/mterp/arm/op_iput_char_quick.S
new file mode 100644
index 0000000000..5212fc355b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_char_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_object.S b/runtime/interpreter/mterp/arm/op_iput_object.S
new file mode 100644
index 0000000000..d942e846f2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_object.S
@@ -0,0 +1,11 @@
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ mov r3, rSELF
+ bl MterpIputObject
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_object_quick.S b/runtime/interpreter/mterp/arm/op_iput_object_quick.S
new file mode 100644
index 0000000000..876b3daad8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_object_quick.S
@@ -0,0 +1,10 @@
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ bl MterpIputObjectQuick
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_quick.S b/runtime/interpreter/mterp/arm/op_iput_quick.S
new file mode 100644
index 0000000000..98c8150cd6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_quick.S
@@ -0,0 +1,14 @@
+%default { "store":"str" }
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ GET_VREG r0, r2 @ r0<- fp[A]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $store r0, [r3, r1] @ obj.field<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_short.S b/runtime/interpreter/mterp/arm/op_iput_short.S
new file mode 100644
index 0000000000..5046f6b6a3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_short.S
@@ -0,0 +1 @@
+%include "arm/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_short_quick.S b/runtime/interpreter/mterp/arm/op_iput_short_quick.S
new file mode 100644
index 0000000000..5212fc355b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_short_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_wide.S b/runtime/interpreter/mterp/arm/op_iput_wide.S
new file mode 100644
index 0000000000..8bbd63efc9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_wide.S
@@ -0,0 +1,16 @@
+ /* iput-wide vA, vB, field@CCCC */
+ .extern artSet64InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet64InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_wide_quick.S b/runtime/interpreter/mterp/arm/op_iput_wide_quick.S
new file mode 100644
index 0000000000..a2fc9e11ed
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_wide_quick.S
@@ -0,0 +1,13 @@
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r3, 1 @ r3<- field byte offset
+ GET_VREG r2, r2 @ r2<- fp[B], the object pointer
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ cmp r2, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ add r0, rFP, r0, lsl #2 @ r0<- &fp[A]
+ ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ strd r0, [r2, r3] @ obj.field<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_long_to_double.S b/runtime/interpreter/mterp/arm/op_long_to_double.S
new file mode 100644
index 0000000000..1d48a2acf2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_long_to_double.S
@@ -0,0 +1,27 @@
+%default {}
+ /*
+ * Specialised 64-bit floating point operation.
+ *
+ * Note: The result will be returned in d2.
+ *
+ * For: long-to-double
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ vldr d0, [r3] @ d0<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ vcvt.f64.s32 d1, s1 @ d1<- (double)(vAAh)
+ vcvt.f64.u32 d2, s0 @ d2<- (double)(vAAl)
+ vldr d3, constval$opcode
+ vmla.f64 d2, d1, d3 @ d2<- vAAh*2^32 + vAAl
+
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ vstr.64 d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+ /* literal pool helper */
+constval${opcode}:
+ .8byte 0x41f0000000000000
diff --git a/runtime/interpreter/mterp/arm/op_long_to_float.S b/runtime/interpreter/mterp/arm/op_long_to_float.S
new file mode 100644
index 0000000000..efa5a66913
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_long_to_float.S
@@ -0,0 +1 @@
+%include "arm/unopNarrower.S" {"instr":"bl __aeabi_l2f"}
diff --git a/runtime/interpreter/mterp/arm/op_long_to_int.S b/runtime/interpreter/mterp/arm/op_long_to_int.S
new file mode 100644
index 0000000000..3e91f230b7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_long_to_int.S
@@ -0,0 +1,2 @@
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%include "arm/op_move.S"
diff --git a/runtime/interpreter/mterp/arm/op_monitor_enter.S b/runtime/interpreter/mterp/arm/op_monitor_enter.S
new file mode 100644
index 0000000000..3c34f75d9d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_monitor_enter.S
@@ -0,0 +1,14 @@
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA (object)
+ mov r1, rSELF @ r1<- self
+ bl artLockObjectFromCode
+ cmp r0, #0
+ bne MterpException
+ FETCH_ADVANCE_INST 1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_monitor_exit.S b/runtime/interpreter/mterp/arm/op_monitor_exit.S
new file mode 100644
index 0000000000..fc7cef5395
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_monitor_exit.S
@@ -0,0 +1,18 @@
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA (object)
+ mov r1, rSELF @ r0<- self
+ bl artUnlockObjectFromCode @ r0<- success for unlock(self, obj)
+ cmp r0, #0 @ failed?
+ bne MterpException
+ FETCH_ADVANCE_INST 1 @ before throw: advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move.S b/runtime/interpreter/mterp/arm/op_move.S
new file mode 100644
index 0000000000..dfecc2432d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[B]
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[A]<- r2
+ .endif
+ GOTO_OPCODE ip @ execute next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_16.S b/runtime/interpreter/mterp/arm/op_move_16.S
new file mode 100644
index 0000000000..78138a238c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH r1, 2 @ r1<- BBBB
+ FETCH r0, 1 @ r0<- AAAA
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT r2, r0 @ fp[AAAA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AAAA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_exception.S b/runtime/interpreter/mterp/arm/op_move_exception.S
new file mode 100644
index 0000000000..0242e26ee5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_exception.S
@@ -0,0 +1,9 @@
+ /* move-exception vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r1, #0 @ r1<- 0
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ SET_VREG_OBJECT r3, r2 @ fp[AA]<- exception obj
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ clear exception
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_from16.S b/runtime/interpreter/mterp/arm/op_move_from16.S
new file mode 100644
index 0000000000..3e79417307
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_from16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH r1, 1 @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT r2, r0 @ fp[AA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_object.S b/runtime/interpreter/mterp/arm/op_move_object.S
new file mode 100644
index 0000000000..16de57bac0
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_object.S
@@ -0,0 +1 @@
+%include "arm/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_object_16.S b/runtime/interpreter/mterp/arm/op_move_object_16.S
new file mode 100644
index 0000000000..25343006a3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_object_16.S
@@ -0,0 +1 @@
+%include "arm/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_object_from16.S b/runtime/interpreter/mterp/arm/op_move_object_from16.S
new file mode 100644
index 0000000000..9e0cf02d17
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_object_from16.S
@@ -0,0 +1 @@
+%include "arm/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_result.S b/runtime/interpreter/mterp/arm/op_move_result.S
new file mode 100644
index 0000000000..f2586a0769
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_result.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ ldr r0, [rFP, #OFF_FP_RESULT_REGISTER] @ get pointer to result JType.
+ ldr r0, [r0] @ r0 <- result.i.
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT r0, r2, r1 @ fp[AA]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_result_object.S b/runtime/interpreter/mterp/arm/op_move_result_object.S
new file mode 100644
index 0000000000..643296a5dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_result_object.S
@@ -0,0 +1 @@
+%include "arm/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_result_wide.S b/runtime/interpreter/mterp/arm/op_move_result_wide.S
new file mode 100644
index 0000000000..c64103c391
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_result_wide.S
@@ -0,0 +1,9 @@
+ /* move-result-wide vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ ldr r3, [rFP, #OFF_FP_RESULT_REGISTER]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- retval.j
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide.S b/runtime/interpreter/mterp/arm/op_move_wide.S
new file mode 100644
index 0000000000..1345b95fa7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_wide.S
@@ -0,0 +1,11 @@
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[B]
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_16.S b/runtime/interpreter/mterp/arm/op_move_wide_16.S
new file mode 100644
index 0000000000..133a4c36ba
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_wide_16.S
@@ -0,0 +1,11 @@
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH r3, 2 @ r3<- BBBB
+ FETCH r2, 1 @ r2<- AAAA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_from16.S b/runtime/interpreter/mterp/arm/op_move_wide_from16.S
new file mode 100644
index 0000000000..f2ae785032
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_wide_from16.S
@@ -0,0 +1,11 @@
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH r3, 1 @ r3<- BBBB
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_mul_double.S b/runtime/interpreter/mterp/arm/op_mul_double.S
new file mode 100644
index 0000000000..530e85a39e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_double.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide.S" {"instr":"fmuld d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_double_2addr.S b/runtime/interpreter/mterp/arm/op_mul_double_2addr.S
new file mode 100644
index 0000000000..da1abc6e04
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_double_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide2addr.S" {"instr":"fmuld d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_float.S b/runtime/interpreter/mterp/arm/op_mul_float.S
new file mode 100644
index 0000000000..6a72e6f42c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_float.S
@@ -0,0 +1 @@
+%include "arm/fbinop.S" {"instr":"fmuls s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_float_2addr.S b/runtime/interpreter/mterp/arm/op_mul_float_2addr.S
new file mode 100644
index 0000000000..edb5101666
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_float_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinop2addr.S" {"instr":"fmuls s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int.S b/runtime/interpreter/mterp/arm/op_mul_int.S
new file mode 100644
index 0000000000..d6151d4d45
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_int.S
@@ -0,0 +1,2 @@
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%include "arm/binop.S" {"instr":"mul r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int_2addr.S b/runtime/interpreter/mterp/arm/op_mul_int_2addr.S
new file mode 100644
index 0000000000..66a797d9fe
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_int_2addr.S
@@ -0,0 +1,2 @@
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%include "arm/binop2addr.S" {"instr":"mul r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int_lit16.S b/runtime/interpreter/mterp/arm/op_mul_int_lit16.S
new file mode 100644
index 0000000000..4e40c438f7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_int_lit16.S
@@ -0,0 +1,2 @@
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%include "arm/binopLit16.S" {"instr":"mul r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int_lit8.S b/runtime/interpreter/mterp/arm/op_mul_int_lit8.S
new file mode 100644
index 0000000000..dbafae9d24
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_int_lit8.S
@@ -0,0 +1,2 @@
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%include "arm/binopLit8.S" {"instr":"mul r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_long.S b/runtime/interpreter/mterp/arm/op_mul_long.S
new file mode 100644
index 0000000000..9e83778e2f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_long.S
@@ -0,0 +1,36 @@
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+ * WX
+ * x YZ
+ * --------
+ * ZW ZX
+ * YW YX
+ *
+ * The low word of the result holds ZX, the high word holds
+ * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
+ * it doesn't fit in the low 64 bits.
+ *
+ * Unlike most ARM math operations, multiply instructions have
+ * restrictions on using the same register more than once (Rd and Rm
+ * cannot be the same).
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST, lsr #8 @ r0<- AA
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ add r0, rFP, r0, lsl #2 @ r0<- &fp[AA]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_mul_long_2addr.S b/runtime/interpreter/mterp/arm/op_mul_long_2addr.S
new file mode 100644
index 0000000000..789dbd3025
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_long_2addr.S
@@ -0,0 +1,24 @@
+ /*
+ * Signed 64-bit integer multiply, "/2addr" version.
+ *
+ * See op_mul_long for an explanation.
+ *
+ * We get a little tight on registers, so to avoid looking up &fp[A]
+ * again we stuff it into rINST.
+ */
+ /* mul-long/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST @ r0<- &fp[A] (free up rINST)
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_neg_double.S b/runtime/interpreter/mterp/arm/op_neg_double.S
new file mode 100644
index 0000000000..33e609cbfa
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_neg_double.S
@@ -0,0 +1 @@
+%include "arm/unopWide.S" {"instr":"add r1, r1, #0x80000000"}
diff --git a/runtime/interpreter/mterp/arm/op_neg_float.S b/runtime/interpreter/mterp/arm/op_neg_float.S
new file mode 100644
index 0000000000..993583fc86
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_neg_float.S
@@ -0,0 +1 @@
+%include "arm/unop.S" {"instr":"add r0, r0, #0x80000000"}
diff --git a/runtime/interpreter/mterp/arm/op_neg_int.S b/runtime/interpreter/mterp/arm/op_neg_int.S
new file mode 100644
index 0000000000..ec0b253745
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_neg_int.S
@@ -0,0 +1 @@
+%include "arm/unop.S" {"instr":"rsb r0, r0, #0"}
diff --git a/runtime/interpreter/mterp/arm/op_neg_long.S b/runtime/interpreter/mterp/arm/op_neg_long.S
new file mode 100644
index 0000000000..dab2eb492e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_neg_long.S
@@ -0,0 +1 @@
+%include "arm/unopWide.S" {"preinstr":"rsbs r0, r0, #0", "instr":"rsc r1, r1, #0"}
diff --git a/runtime/interpreter/mterp/arm/op_new_array.S b/runtime/interpreter/mterp/arm/op_new_array.S
new file mode 100644
index 0000000000..8bb792c217
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_new_array.S
@@ -0,0 +1,19 @@
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ mov r3, rSELF
+ bl MterpNewArray
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_new_instance.S b/runtime/interpreter/mterp/arm/op_new_instance.S
new file mode 100644
index 0000000000..95d4be8762
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_new_instance.S
@@ -0,0 +1,14 @@
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rSELF
+ mov r2, rINST
+ bl MterpNewInstance @ (shadow_frame, self, inst_data)
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_nop.S b/runtime/interpreter/mterp/arm/op_nop.S
new file mode 100644
index 0000000000..af0f88f7e3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_nop.S
@@ -0,0 +1,3 @@
+ FETCH_ADVANCE_INST 1 @ advance to next instr, load rINST
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ GOTO_OPCODE ip @ execute it
diff --git a/runtime/interpreter/mterp/arm/op_not_int.S b/runtime/interpreter/mterp/arm/op_not_int.S
new file mode 100644
index 0000000000..816485ae6a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_not_int.S
@@ -0,0 +1 @@
+%include "arm/unop.S" {"instr":"mvn r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_not_long.S b/runtime/interpreter/mterp/arm/op_not_long.S
new file mode 100644
index 0000000000..49a59056d5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_not_long.S
@@ -0,0 +1 @@
+%include "arm/unopWide.S" {"preinstr":"mvn r0, r0", "instr":"mvn r1, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int.S b/runtime/interpreter/mterp/arm/op_or_int.S
new file mode 100644
index 0000000000..b046e8d8d8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_or_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"instr":"orr r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int_2addr.S b/runtime/interpreter/mterp/arm/op_or_int_2addr.S
new file mode 100644
index 0000000000..493c59f285
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_or_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"instr":"orr r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int_lit16.S b/runtime/interpreter/mterp/arm/op_or_int_lit16.S
new file mode 100644
index 0000000000..0a01db8052
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_or_int_lit16.S
@@ -0,0 +1 @@
+%include "arm/binopLit16.S" {"instr":"orr r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int_lit8.S b/runtime/interpreter/mterp/arm/op_or_int_lit8.S
new file mode 100644
index 0000000000..2d85038d59
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_or_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"instr":"orr r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_long.S b/runtime/interpreter/mterp/arm/op_or_long.S
new file mode 100644
index 0000000000..048c45ccb5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_or_long.S
@@ -0,0 +1 @@
+%include "arm/binopWide.S" {"preinstr":"orr r0, r0, r2", "instr":"orr r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_or_long_2addr.S b/runtime/interpreter/mterp/arm/op_or_long_2addr.S
new file mode 100644
index 0000000000..93953461ba
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_or_long_2addr.S
@@ -0,0 +1 @@
+%include "arm/binopWide2addr.S" {"preinstr":"orr r0, r0, r2", "instr":"orr r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_packed_switch.S b/runtime/interpreter/mterp/arm/op_packed_switch.S
new file mode 100644
index 0000000000..1e3370ea6a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_packed_switch.S
@@ -0,0 +1,39 @@
+%default { "func":"MterpDoPackedSwitch" }
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+#if MTERP_SUSPEND
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG r1, r3 @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl $func @ r0<- code-unit branch offset
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+ ldrle rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG r1, r3 @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl $func @ r0<- code-unit branch offset
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ble MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm/op_rem_double.S b/runtime/interpreter/mterp/arm/op_rem_double.S
new file mode 100644
index 0000000000..b539221eac
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_double.S
@@ -0,0 +1,2 @@
+/* EABI doesn't define a double remainder function, but libm does */
+%include "arm/binopWide.S" {"instr":"bl fmod"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_double_2addr.S b/runtime/interpreter/mterp/arm/op_rem_double_2addr.S
new file mode 100644
index 0000000000..372ef1dfe6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_double_2addr.S
@@ -0,0 +1,2 @@
+/* EABI doesn't define a double remainder function, but libm does */
+%include "arm/binopWide2addr.S" {"instr":"bl fmod"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_float.S b/runtime/interpreter/mterp/arm/op_rem_float.S
new file mode 100644
index 0000000000..7bd10deafe
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_float.S
@@ -0,0 +1,2 @@
+/* EABI doesn't define a float remainder function, but libm does */
+%include "arm/binop.S" {"instr":"bl fmodf"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_float_2addr.S b/runtime/interpreter/mterp/arm/op_rem_float_2addr.S
new file mode 100644
index 0000000000..93c5faef68
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_float_2addr.S
@@ -0,0 +1,2 @@
+/* EABI doesn't define a float remainder function, but libm does */
+%include "arm/binop2addr.S" {"instr":"bl fmodf"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_int.S b/runtime/interpreter/mterp/arm/op_rem_int.S
new file mode 100644
index 0000000000..ff6257340f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_int.S
@@ -0,0 +1,33 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int
+ *
+ */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op, r0-r2 changed
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_rem_int_2addr.S b/runtime/interpreter/mterp/arm/op_rem_int_2addr.S
new file mode 100644
index 0000000000..ba5751a2ae
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_int_2addr.S
@@ -0,0 +1,32 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/2addr
+ *
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
diff --git a/runtime/interpreter/mterp/arm/op_rem_int_lit16.S b/runtime/interpreter/mterp/arm/op_rem_int_lit16.S
new file mode 100644
index 0000000000..4edb187f50
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_int_lit16.S
@@ -0,0 +1,31 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/lit16
+ *
+ */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_rem_int_lit8.S b/runtime/interpreter/mterp/arm/op_rem_int_lit8.S
new file mode 100644
index 0000000000..3888361bc5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_int_lit8.S
@@ -0,0 +1,32 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/lit8
+ *
+ */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_rem_long.S b/runtime/interpreter/mterp/arm/op_rem_long.S
new file mode 100644
index 0000000000..b2b1c24187
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_long.S
@@ -0,0 +1,2 @@
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+%include "arm/binopWide.S" {"instr":"bl __aeabi_ldivmod", "result0":"r2", "result1":"r3", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_long_2addr.S b/runtime/interpreter/mterp/arm/op_rem_long_2addr.S
new file mode 100644
index 0000000000..f87d493a91
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_long_2addr.S
@@ -0,0 +1,2 @@
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+%include "arm/binopWide2addr.S" {"instr":"bl __aeabi_ldivmod", "result0":"r2", "result1":"r3", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_return.S b/runtime/interpreter/mterp/arm/op_return.S
new file mode 100644
index 0000000000..a4ffd04ea6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_return.S
@@ -0,0 +1,12 @@
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA
+ mov r1, #0
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_return_object.S b/runtime/interpreter/mterp/arm/op_return_object.S
new file mode 100644
index 0000000000..c4907302c9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_return_object.S
@@ -0,0 +1 @@
+%include "arm/op_return.S"
diff --git a/runtime/interpreter/mterp/arm/op_return_void.S b/runtime/interpreter/mterp/arm/op_return_void.S
new file mode 100644
index 0000000000..f6dfd99309
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_return_void.S
@@ -0,0 +1,5 @@
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r0, #0
+ mov r1, #0
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S b/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S
new file mode 100644
index 0000000000..7322940a74
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S
@@ -0,0 +1,3 @@
+ mov r0, #0
+ mov r1, #0
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_return_wide.S b/runtime/interpreter/mterp/arm/op_return_wide.S
new file mode 100644
index 0000000000..2881c87e58
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_return_wide.S
@@ -0,0 +1,10 @@
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_rsub_int.S b/runtime/interpreter/mterp/arm/op_rsub_int.S
new file mode 100644
index 0000000000..1508dd4373
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rsub_int.S
@@ -0,0 +1,2 @@
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%include "arm/binopLit16.S" {"instr":"rsb r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S b/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S
new file mode 100644
index 0000000000..2ee11e159e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"instr":"rsb r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_sget.S b/runtime/interpreter/mterp/arm/op_sget.S
new file mode 100644
index 0000000000..2b81f5069f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget.S
@@ -0,0 +1,27 @@
+%default { "is_object":"0", "helper":"artGet32StaticFromCode" }
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern $helper
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl $helper
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if $is_object
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
diff --git a/runtime/interpreter/mterp/arm/op_sget_boolean.S b/runtime/interpreter/mterp/arm/op_sget_boolean.S
new file mode 100644
index 0000000000..ebfb44cb20
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget_boolean.S
@@ -0,0 +1 @@
+%include "arm/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_byte.S b/runtime/interpreter/mterp/arm/op_sget_byte.S
new file mode 100644
index 0000000000..d76862e600
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget_byte.S
@@ -0,0 +1 @@
+%include "arm/op_sget.S" {"helper":"artGetByteStaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_char.S b/runtime/interpreter/mterp/arm/op_sget_char.S
new file mode 100644
index 0000000000..b7fcfc23d4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget_char.S
@@ -0,0 +1 @@
+%include "arm/op_sget.S" {"helper":"artGetCharStaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_object.S b/runtime/interpreter/mterp/arm/op_sget_object.S
new file mode 100644
index 0000000000..8e7d075b72
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget_object.S
@@ -0,0 +1 @@
+%include "arm/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_short.S b/runtime/interpreter/mterp/arm/op_sget_short.S
new file mode 100644
index 0000000000..3e80f0da87
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget_short.S
@@ -0,0 +1 @@
+%include "arm/op_sget.S" {"helper":"artGetShortStaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_wide.S b/runtime/interpreter/mterp/arm/op_sget_wide.S
new file mode 100644
index 0000000000..97db05f596
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget_wide.S
@@ -0,0 +1,21 @@
+ /*
+ * SGET_WIDE handler wrapper.
+ *
+ */
+ /* sget-wide vAA, field@BBBB */
+
+ .extern artGet64StaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGet64StaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r9, rINST, lsr #8 @ r9<- AA
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shl_int.S b/runtime/interpreter/mterp/arm/op_shl_int.S
new file mode 100644
index 0000000000..7e4c768888
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shl_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asl r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_int_2addr.S b/runtime/interpreter/mterp/arm/op_shl_int_2addr.S
new file mode 100644
index 0000000000..4286577e19
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shl_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asl r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_int_lit8.S b/runtime/interpreter/mterp/arm/op_shl_int_lit8.S
new file mode 100644
index 0000000000..6a48bfcf17
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shl_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asl r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_long.S b/runtime/interpreter/mterp/arm/op_shl_long.S
new file mode 100644
index 0000000000..dc8a679cf7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shl_long.S
@@ -0,0 +1,27 @@
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shl_long_2addr.S b/runtime/interpreter/mterp/arm/op_shl_long_2addr.S
new file mode 100644
index 0000000000..fd7668de62
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shl_long_2addr.S
@@ -0,0 +1,22 @@
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shr_int.S b/runtime/interpreter/mterp/arm/op_shr_int.S
new file mode 100644
index 0000000000..6317605c6d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shr_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shr_int_2addr.S b/runtime/interpreter/mterp/arm/op_shr_int_2addr.S
new file mode 100644
index 0000000000..cc8632f4bd
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shr_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shr_int_lit8.S b/runtime/interpreter/mterp/arm/op_shr_int_lit8.S
new file mode 100644
index 0000000000..60fe5fca94
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shr_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shr_long.S b/runtime/interpreter/mterp/arm/op_shr_long.S
new file mode 100644
index 0000000000..c0edf90f76
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shr_long.S
@@ -0,0 +1,27 @@
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shr_long_2addr.S b/runtime/interpreter/mterp/arm/op_shr_long_2addr.S
new file mode 100644
index 0000000000..ffeaf9c865
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shr_long_2addr.S
@@ -0,0 +1,22 @@
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_sparse_switch.S b/runtime/interpreter/mterp/arm/op_sparse_switch.S
new file mode 100644
index 0000000000..9f7a42b96e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sparse_switch.S
@@ -0,0 +1 @@
+%include "arm/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/arm/op_sput.S b/runtime/interpreter/mterp/arm/op_sput.S
new file mode 100644
index 0000000000..7e0c1a64ef
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput.S
@@ -0,0 +1,20 @@
+%default { "helper":"artSet32StaticFromCode"}
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ mov r3, rINST, lsr #8 @ r3<- AA
+ GET_VREG r1, r3 @ r1<= fp[AA]
+ ldr r2, [rFP, #OFF_FP_METHOD]
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl $helper
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_sput_boolean.S b/runtime/interpreter/mterp/arm/op_sput_boolean.S
new file mode 100644
index 0000000000..e3bbf2b8ba
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput_boolean.S
@@ -0,0 +1 @@
+%include "arm/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_byte.S b/runtime/interpreter/mterp/arm/op_sput_byte.S
new file mode 100644
index 0000000000..e3bbf2b8ba
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput_byte.S
@@ -0,0 +1 @@
+%include "arm/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_char.S b/runtime/interpreter/mterp/arm/op_sput_char.S
new file mode 100644
index 0000000000..d8d65cb5ef
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput_char.S
@@ -0,0 +1 @@
+%include "arm/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_object.S b/runtime/interpreter/mterp/arm/op_sput_object.S
new file mode 100644
index 0000000000..6d3a9a7110
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput_object.S
@@ -0,0 +1,11 @@
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ mov r3, rSELF
+ bl MterpSputObject
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_sput_short.S b/runtime/interpreter/mterp/arm/op_sput_short.S
new file mode 100644
index 0000000000..d8d65cb5ef
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput_short.S
@@ -0,0 +1 @@
+%include "arm/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_wide.S b/runtime/interpreter/mterp/arm/op_sput_wide.S
new file mode 100644
index 0000000000..adbcffa5a9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput_wide.S
@@ -0,0 +1,19 @@
+ /*
+ * SPUT_WIDE handler wrapper.
+ *
+ */
+ /* sput-wide vAA, field@BBBB */
+ .extern artSet64IndirectStaticFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rINST, lsr #8 @ r3<- AA
+ add r2, rFP, r2, lsl #2
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet64IndirectStaticFromMterp
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_sub_double.S b/runtime/interpreter/mterp/arm/op_sub_double.S
new file mode 100644
index 0000000000..69bcc6736c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_double.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide.S" {"instr":"fsubd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_double_2addr.S b/runtime/interpreter/mterp/arm/op_sub_double_2addr.S
new file mode 100644
index 0000000000..2ea59fe854
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_double_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide2addr.S" {"instr":"fsubd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_float.S b/runtime/interpreter/mterp/arm/op_sub_float.S
new file mode 100644
index 0000000000..3f17a0dfe3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_float.S
@@ -0,0 +1 @@
+%include "arm/fbinop.S" {"instr":"fsubs s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_float_2addr.S b/runtime/interpreter/mterp/arm/op_sub_float_2addr.S
new file mode 100644
index 0000000000..2f4aac4ad8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_float_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinop2addr.S" {"instr":"fsubs s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_int.S b/runtime/interpreter/mterp/arm/op_sub_int.S
new file mode 100644
index 0000000000..efb9e10035
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"instr":"sub r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_int_2addr.S b/runtime/interpreter/mterp/arm/op_sub_int_2addr.S
new file mode 100644
index 0000000000..4d3036b82c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"instr":"sub r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_long.S b/runtime/interpreter/mterp/arm/op_sub_long.S
new file mode 100644
index 0000000000..6f1eb6ed77
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_long.S
@@ -0,0 +1 @@
+%include "arm/binopWide.S" {"preinstr":"subs r0, r0, r2", "instr":"sbc r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_long_2addr.S b/runtime/interpreter/mterp/arm/op_sub_long_2addr.S
new file mode 100644
index 0000000000..8e9da05525
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_long_2addr.S
@@ -0,0 +1 @@
+%include "arm/binopWide2addr.S" {"preinstr":"subs r0, r0, r2", "instr":"sbc r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_throw.S b/runtime/interpreter/mterp/arm/op_throw.S
new file mode 100644
index 0000000000..be49ada424
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_throw.S
@@ -0,0 +1,11 @@
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r1, r2 @ r1<- vAA (exception object)
+ cmp r1, #0 @ null object?
+ beq common_errNullObject @ yes, throw an NPE instead
+ str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ thread->exception<- obj
+ b MterpException
diff --git a/runtime/interpreter/mterp/arm/op_unused_3e.S b/runtime/interpreter/mterp/arm/op_unused_3e.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_3e.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_3f.S b/runtime/interpreter/mterp/arm/op_unused_3f.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_3f.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_40.S b/runtime/interpreter/mterp/arm/op_unused_40.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_40.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_41.S b/runtime/interpreter/mterp/arm/op_unused_41.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_41.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_42.S b/runtime/interpreter/mterp/arm/op_unused_42.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_42.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_43.S b/runtime/interpreter/mterp/arm/op_unused_43.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_43.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_73.S b/runtime/interpreter/mterp/arm/op_unused_73.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_73.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_79.S b/runtime/interpreter/mterp/arm/op_unused_79.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_79.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_7a.S b/runtime/interpreter/mterp/arm/op_unused_7a.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_7a.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f3.S b/runtime/interpreter/mterp/arm/op_unused_f3.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f3.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f4.S b/runtime/interpreter/mterp/arm/op_unused_f4.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f4.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f5.S b/runtime/interpreter/mterp/arm/op_unused_f5.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f5.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f6.S b/runtime/interpreter/mterp/arm/op_unused_f6.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f6.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f7.S b/runtime/interpreter/mterp/arm/op_unused_f7.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f7.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f8.S b/runtime/interpreter/mterp/arm/op_unused_f8.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f8.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f9.S b/runtime/interpreter/mterp/arm/op_unused_f9.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f9.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fa.S b/runtime/interpreter/mterp/arm/op_unused_fa.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_fa.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fb.S b/runtime/interpreter/mterp/arm/op_unused_fb.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_fb.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fc.S b/runtime/interpreter/mterp/arm/op_unused_fc.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_fc.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fd.S b/runtime/interpreter/mterp/arm/op_unused_fd.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_fd.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fe.S b/runtime/interpreter/mterp/arm/op_unused_fe.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_fe.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_ff.S b/runtime/interpreter/mterp/arm/op_unused_ff.S
new file mode 100644
index 0000000000..10948dc06c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_ff.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_ushr_int.S b/runtime/interpreter/mterp/arm/op_ushr_int.S
new file mode 100644
index 0000000000..a74361b64c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_ushr_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, lsr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_ushr_int_2addr.S b/runtime/interpreter/mterp/arm/op_ushr_int_2addr.S
new file mode 100644
index 0000000000..f2d1d13881
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_ushr_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, lsr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S b/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S
new file mode 100644
index 0000000000..40a443549a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, lsr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_ushr_long.S b/runtime/interpreter/mterp/arm/op_ushr_long.S
new file mode 100644
index 0000000000..f64c861ce5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_ushr_long.S
@@ -0,0 +1,27 @@
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S b/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S
new file mode 100644
index 0000000000..dbab08d817
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S
@@ -0,0 +1,22 @@
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_xor_int.S b/runtime/interpreter/mterp/arm/op_xor_int.S
new file mode 100644
index 0000000000..fd7a4b7a02
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_xor_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"instr":"eor r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_int_2addr.S b/runtime/interpreter/mterp/arm/op_xor_int_2addr.S
new file mode 100644
index 0000000000..196a665724
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_xor_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"instr":"eor r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_int_lit16.S b/runtime/interpreter/mterp/arm/op_xor_int_lit16.S
new file mode 100644
index 0000000000..39f2a47bfa
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_xor_int_lit16.S
@@ -0,0 +1 @@
+%include "arm/binopLit16.S" {"instr":"eor r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_int_lit8.S b/runtime/interpreter/mterp/arm/op_xor_int_lit8.S
new file mode 100644
index 0000000000..46bb712167
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_xor_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"instr":"eor r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_long.S b/runtime/interpreter/mterp/arm/op_xor_long.S
new file mode 100644
index 0000000000..4f830d01be
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_xor_long.S
@@ -0,0 +1 @@
+%include "arm/binopWide.S" {"preinstr":"eor r0, r0, r2", "instr":"eor r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_long_2addr.S b/runtime/interpreter/mterp/arm/op_xor_long_2addr.S
new file mode 100644
index 0000000000..5b5ed88059
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_xor_long_2addr.S
@@ -0,0 +1 @@
+%include "arm/binopWide2addr.S" {"preinstr":"eor r0, r0, r2", "instr":"eor r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/unop.S b/runtime/interpreter/mterp/arm/unop.S
new file mode 100644
index 0000000000..56518b5b2b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/unop.S
@@ -0,0 +1,20 @@
+%default {"preinstr":""}
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ $preinstr @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $instr @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
diff --git a/runtime/interpreter/mterp/arm/unopNarrower.S b/runtime/interpreter/mterp/arm/unopNarrower.S
new file mode 100644
index 0000000000..a5fc02797d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/unopNarrower.S
@@ -0,0 +1,23 @@
+%default {"preinstr":""}
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for op_move.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $preinstr @ optional op; may set condition codes
+ $instr @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 9-10 instructions */
diff --git a/runtime/interpreter/mterp/arm/unopWide.S b/runtime/interpreter/mterp/arm/unopWide.S
new file mode 100644
index 0000000000..7b8739cb92
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/unopWide.S
@@ -0,0 +1,21 @@
+%default {"preinstr":""}
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $preinstr @ optional op; may set condition codes
+ $instr @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-11 instructions */
diff --git a/runtime/interpreter/mterp/arm/unopWider.S b/runtime/interpreter/mterp/arm/unopWider.S
new file mode 100644
index 0000000000..657a3956e5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/unopWider.S
@@ -0,0 +1,20 @@
+%default {"preinstr":""}
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ $preinstr @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $instr @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 9-10 instructions */
diff --git a/runtime/interpreter/mterp/arm/unused.S b/runtime/interpreter/mterp/arm/unused.S
new file mode 100644
index 0000000000..ffa00becfd
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/unused.S
@@ -0,0 +1,4 @@
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
diff --git a/runtime/interpreter/mterp/arm/zcmp.S b/runtime/interpreter/mterp/arm/zcmp.S
new file mode 100644
index 0000000000..6e9ef55104
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/zcmp.S
@@ -0,0 +1,32 @@
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ mov${revcmp} r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ mov${revcmp} r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/config_arm b/runtime/interpreter/mterp/config_arm
new file mode 100644
index 0000000000..436dcd28f6
--- /dev/null
+++ b/runtime/interpreter/mterp/config_arm
@@ -0,0 +1,298 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for ARMv7-A targets.
+#
+
+handler-style computed-goto
+handler-size 128
+
+# source for alternate entry stub
+asm-alt-stub arm/alt_stub.S
+
+# file header and basic definitions
+import arm/header.S
+
+# arch-specific entry point to interpreter
+import arm/entry.S
+
+# Stub to switch to alternate interpreter
+fallback-stub arm/fallback.S
+
+# opcode list; argument to op-start is default directory
+op-start arm
+ # (override example:) op op_sub_float_2addr arm-vfp
+ # (fallback example:) op op_sub_float_2addr FALLBACK
+
+ # op op_nop FALLBACK
+ # op op_move FALLBACK
+ # op op_move_from16 FALLBACK
+ # op op_move_16 FALLBACK
+ # op op_move_wide FALLBACK
+ # op op_move_wide_from16 FALLBACK
+ # op op_move_wide_16 FALLBACK
+ # op op_move_object FALLBACK
+ # op op_move_object_from16 FALLBACK
+ # op op_move_object_16 FALLBACK
+ # op op_move_result FALLBACK
+ # op op_move_result_wide FALLBACK
+ # op op_move_result_object FALLBACK
+ # op op_move_exception FALLBACK
+ # op op_return_void FALLBACK
+ # op op_return FALLBACK
+ # op op_return_wide FALLBACK
+ # op op_return_object FALLBACK
+ # op op_const_4 FALLBACK
+ # op op_const_16 FALLBACK
+ # op op_const FALLBACK
+ # op op_const_high16 FALLBACK
+ # op op_const_wide_16 FALLBACK
+ # op op_const_wide_32 FALLBACK
+ # op op_const_wide FALLBACK
+ # op op_const_wide_high16 FALLBACK
+ # op op_const_string FALLBACK
+ # op op_const_string_jumbo FALLBACK
+ # op op_const_class FALLBACK
+ # op op_monitor_enter FALLBACK
+ # op op_monitor_exit FALLBACK
+ # op op_check_cast FALLBACK
+ # op op_instance_of FALLBACK
+ # op op_array_length FALLBACK
+ # op op_new_instance FALLBACK
+ # op op_new_array FALLBACK
+ # op op_filled_new_array FALLBACK
+ # op op_filled_new_array_range FALLBACK
+ # op op_fill_array_data FALLBACK
+ # op op_throw FALLBACK
+ # op op_goto FALLBACK
+ # op op_goto_16 FALLBACK
+ # op op_goto_32 FALLBACK
+ # op op_packed_switch FALLBACK
+ # op op_sparse_switch FALLBACK
+ # op op_cmpl_float FALLBACK
+ # op op_cmpg_float FALLBACK
+ # op op_cmpl_double FALLBACK
+ # op op_cmpg_double FALLBACK
+ # op op_cmp_long FALLBACK
+ # op op_if_eq FALLBACK
+ # op op_if_ne FALLBACK
+ # op op_if_lt FALLBACK
+ # op op_if_ge FALLBACK
+ # op op_if_gt FALLBACK
+ # op op_if_le FALLBACK
+ # op op_if_eqz FALLBACK
+ # op op_if_nez FALLBACK
+ # op op_if_ltz FALLBACK
+ # op op_if_gez FALLBACK
+ # op op_if_gtz FALLBACK
+ # op op_if_lez FALLBACK
+ # op op_unused_3e FALLBACK
+ # op op_unused_3f FALLBACK
+ # op op_unused_40 FALLBACK
+ # op op_unused_41 FALLBACK
+ # op op_unused_42 FALLBACK
+ # op op_unused_43 FALLBACK
+ # op op_aget FALLBACK
+ # op op_aget_wide FALLBACK
+ # op op_aget_object FALLBACK
+ # op op_aget_boolean FALLBACK
+ # op op_aget_byte FALLBACK
+ # op op_aget_char FALLBACK
+ # op op_aget_short FALLBACK
+ # op op_aput FALLBACK
+ # op op_aput_wide FALLBACK
+ # op op_aput_object FALLBACK
+ # op op_aput_boolean FALLBACK
+ # op op_aput_byte FALLBACK
+ # op op_aput_char FALLBACK
+ # op op_aput_short FALLBACK
+ # op op_iget FALLBACK
+ # op op_iget_wide FALLBACK
+ # op op_iget_object FALLBACK
+ # op op_iget_boolean FALLBACK
+ # op op_iget_byte FALLBACK
+ # op op_iget_char FALLBACK
+ # op op_iget_short FALLBACK
+ # op op_iput FALLBACK
+ # op op_iput_wide FALLBACK
+ # op op_iput_object FALLBACK
+ # op op_iput_boolean FALLBACK
+ # op op_iput_byte FALLBACK
+ # op op_iput_char FALLBACK
+ # op op_iput_short FALLBACK
+ # op op_sget FALLBACK
+ # op op_sget_wide FALLBACK
+ # op op_sget_object FALLBACK
+ # op op_sget_boolean FALLBACK
+ # op op_sget_byte FALLBACK
+ # op op_sget_char FALLBACK
+ # op op_sget_short FALLBACK
+ # op op_sput FALLBACK
+ # op op_sput_wide FALLBACK
+ # op op_sput_object FALLBACK
+ # op op_sput_boolean FALLBACK
+ # op op_sput_byte FALLBACK
+ # op op_sput_char FALLBACK
+ # op op_sput_short FALLBACK
+ # op op_invoke_virtual FALLBACK
+ # op op_invoke_super FALLBACK
+ # op op_invoke_direct FALLBACK
+ # op op_invoke_static FALLBACK
+ # op op_invoke_interface FALLBACK
+ # op op_return_void_no_barrier FALLBACK
+ # op op_invoke_virtual_range FALLBACK
+ # op op_invoke_super_range FALLBACK
+ # op op_invoke_direct_range FALLBACK
+ # op op_invoke_static_range FALLBACK
+ # op op_invoke_interface_range FALLBACK
+ # op op_unused_79 FALLBACK
+ # op op_unused_7a FALLBACK
+ # op op_neg_int FALLBACK
+ # op op_not_int FALLBACK
+ # op op_neg_long FALLBACK
+ # op op_not_long FALLBACK
+ # op op_neg_float FALLBACK
+ # op op_neg_double FALLBACK
+ # op op_int_to_long FALLBACK
+ # op op_int_to_float FALLBACK
+ # op op_int_to_double FALLBACK
+ # op op_long_to_int FALLBACK
+ # op op_long_to_float FALLBACK
+ # op op_long_to_double FALLBACK
+ # op op_float_to_int FALLBACK
+ # op op_float_to_long FALLBACK
+ # op op_float_to_double FALLBACK
+ # op op_double_to_int FALLBACK
+ # op op_double_to_long FALLBACK
+ # op op_double_to_float FALLBACK
+ # op op_int_to_byte FALLBACK
+ # op op_int_to_char FALLBACK
+ # op op_int_to_short FALLBACK
+ # op op_add_int FALLBACK
+ # op op_sub_int FALLBACK
+ # op op_mul_int FALLBACK
+ # op op_div_int FALLBACK
+ # op op_rem_int FALLBACK
+ # op op_and_int FALLBACK
+ # op op_or_int FALLBACK
+ # op op_xor_int FALLBACK
+ # op op_shl_int FALLBACK
+ # op op_shr_int FALLBACK
+ # op op_ushr_int FALLBACK
+ # op op_add_long FALLBACK
+ # op op_sub_long FALLBACK
+ # op op_mul_long FALLBACK
+ # op op_div_long FALLBACK
+ # op op_rem_long FALLBACK
+ # op op_and_long FALLBACK
+ # op op_or_long FALLBACK
+ # op op_xor_long FALLBACK
+ # op op_shl_long FALLBACK
+ # op op_shr_long FALLBACK
+ # op op_ushr_long FALLBACK
+ # op op_add_float FALLBACK
+ # op op_sub_float FALLBACK
+ # op op_mul_float FALLBACK
+ # op op_div_float FALLBACK
+ # op op_rem_float FALLBACK
+ # op op_add_double FALLBACK
+ # op op_sub_double FALLBACK
+ # op op_mul_double FALLBACK
+ # op op_div_double FALLBACK
+ # op op_rem_double FALLBACK
+ # op op_add_int_2addr FALLBACK
+ # op op_sub_int_2addr FALLBACK
+ # op op_mul_int_2addr FALLBACK
+ # op op_div_int_2addr FALLBACK
+ # op op_rem_int_2addr FALLBACK
+ # op op_and_int_2addr FALLBACK
+ # op op_or_int_2addr FALLBACK
+ # op op_xor_int_2addr FALLBACK
+ # op op_shl_int_2addr FALLBACK
+ # op op_shr_int_2addr FALLBACK
+ # op op_ushr_int_2addr FALLBACK
+ # op op_add_long_2addr FALLBACK
+ # op op_sub_long_2addr FALLBACK
+ # op op_mul_long_2addr FALLBACK
+ # op op_div_long_2addr FALLBACK
+ # op op_rem_long_2addr FALLBACK
+ # op op_and_long_2addr FALLBACK
+ # op op_or_long_2addr FALLBACK
+ # op op_xor_long_2addr FALLBACK
+ # op op_shl_long_2addr FALLBACK
+ # op op_shr_long_2addr FALLBACK
+ # op op_ushr_long_2addr FALLBACK
+ # op op_add_float_2addr FALLBACK
+ # op op_sub_float_2addr FALLBACK
+ # op op_mul_float_2addr FALLBACK
+ # op op_div_float_2addr FALLBACK
+ # op op_rem_float_2addr FALLBACK
+ # op op_add_double_2addr FALLBACK
+ # op op_sub_double_2addr FALLBACK
+ # op op_mul_double_2addr FALLBACK
+ # op op_div_double_2addr FALLBACK
+ # op op_rem_double_2addr FALLBACK
+ # op op_add_int_lit16 FALLBACK
+ # op op_rsub_int FALLBACK
+ # op op_mul_int_lit16 FALLBACK
+ # op op_div_int_lit16 FALLBACK
+ # op op_rem_int_lit16 FALLBACK
+ # op op_and_int_lit16 FALLBACK
+ # op op_or_int_lit16 FALLBACK
+ # op op_xor_int_lit16 FALLBACK
+ # op op_add_int_lit8 FALLBACK
+ # op op_rsub_int_lit8 FALLBACK
+ # op op_mul_int_lit8 FALLBACK
+ # op op_div_int_lit8 FALLBACK
+ # op op_rem_int_lit8 FALLBACK
+ # op op_and_int_lit8 FALLBACK
+ # op op_or_int_lit8 FALLBACK
+ # op op_xor_int_lit8 FALLBACK
+ # op op_shl_int_lit8 FALLBACK
+ # op op_shr_int_lit8 FALLBACK
+ # op op_ushr_int_lit8 FALLBACK
+ # op op_iget_quick FALLBACK
+ # op op_iget_wide_quick FALLBACK
+ # op op_iget_object_quick FALLBACK
+ # op op_iput_quick FALLBACK
+ # op op_iput_wide_quick FALLBACK
+ # op op_iput_object_quick FALLBACK
+ # op op_invoke_virtual_quick FALLBACK
+ # op op_invoke_virtual_range_quick FALLBACK
+ # op op_iput_boolean_quick FALLBACK
+ # op op_iput_byte_quick FALLBACK
+ # op op_iput_char_quick FALLBACK
+ # op op_iput_short_quick FALLBACK
+ # op op_iget_boolean_quick FALLBACK
+ # op op_iget_byte_quick FALLBACK
+ # op op_iget_char_quick FALLBACK
+ # op op_iget_short_quick FALLBACK
+ op op_invoke_lambda FALLBACK
+ # op op_unused_f4 FALLBACK
+ op op_capture_variable FALLBACK
+ op op_create_lambda FALLBACK
+ op op_liberate_variable FALLBACK
+ op op_box_lambda FALLBACK
+ op op_unbox_lambda FALLBACK
+ # op op_unused_fa FALLBACK
+ # op op_unused_fb FALLBACK
+ # op op_unused_fc FALLBACK
+ # op op_unused_fd FALLBACK
+ # op op_unused_fe FALLBACK
+ # op op_unused_ff FALLBACK
+op-end
+
+# common subroutines for asm
+import arm/footer.S
diff --git a/runtime/interpreter/mterp/config_arm64 b/runtime/interpreter/mterp/config_arm64
new file mode 100644
index 0000000000..ef3c721df2
--- /dev/null
+++ b/runtime/interpreter/mterp/config_arm64
@@ -0,0 +1,298 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for ARM64
+#
+
+handler-style computed-goto
+handler-size 128
+
+# source for alternate entry stub
+asm-alt-stub arm64/alt_stub.S
+
+# file header and basic definitions
+import arm64/header.S
+
+# arch-specific entry point to interpreter
+import arm64/entry.S
+
+# Stub to switch to alternate interpreter
+fallback-stub arm64/fallback.S
+
+# opcode list; argument to op-start is default directory
+op-start arm64
+ # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
+ # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
+
+ op op_nop FALLBACK
+ op op_move FALLBACK
+ op op_move_from16 FALLBACK
+ op op_move_16 FALLBACK
+ op op_move_wide FALLBACK
+ op op_move_wide_from16 FALLBACK
+ op op_move_wide_16 FALLBACK
+ op op_move_object FALLBACK
+ op op_move_object_from16 FALLBACK
+ op op_move_object_16 FALLBACK
+ op op_move_result FALLBACK
+ op op_move_result_wide FALLBACK
+ op op_move_result_object FALLBACK
+ op op_move_exception FALLBACK
+ op op_return_void FALLBACK
+ op op_return FALLBACK
+ op op_return_wide FALLBACK
+ op op_return_object FALLBACK
+ op op_const_4 FALLBACK
+ op op_const_16 FALLBACK
+ op op_const FALLBACK
+ op op_const_high16 FALLBACK
+ op op_const_wide_16 FALLBACK
+ op op_const_wide_32 FALLBACK
+ op op_const_wide FALLBACK
+ op op_const_wide_high16 FALLBACK
+ op op_const_string FALLBACK
+ op op_const_string_jumbo FALLBACK
+ op op_const_class FALLBACK
+ op op_monitor_enter FALLBACK
+ op op_monitor_exit FALLBACK
+ op op_check_cast FALLBACK
+ op op_instance_of FALLBACK
+ op op_array_length FALLBACK
+ op op_new_instance FALLBACK
+ op op_new_array FALLBACK
+ op op_filled_new_array FALLBACK
+ op op_filled_new_array_range FALLBACK
+ op op_fill_array_data FALLBACK
+ op op_throw FALLBACK
+ op op_goto FALLBACK
+ op op_goto_16 FALLBACK
+ op op_goto_32 FALLBACK
+ op op_packed_switch FALLBACK
+ op op_sparse_switch FALLBACK
+ op op_cmpl_float FALLBACK
+ op op_cmpg_float FALLBACK
+ op op_cmpl_double FALLBACK
+ op op_cmpg_double FALLBACK
+ op op_cmp_long FALLBACK
+ op op_if_eq FALLBACK
+ op op_if_ne FALLBACK
+ op op_if_lt FALLBACK
+ op op_if_ge FALLBACK
+ op op_if_gt FALLBACK
+ op op_if_le FALLBACK
+ op op_if_eqz FALLBACK
+ op op_if_nez FALLBACK
+ op op_if_ltz FALLBACK
+ op op_if_gez FALLBACK
+ op op_if_gtz FALLBACK
+ op op_if_lez FALLBACK
+ op_unused_3e FALLBACK
+ op_unused_3f FALLBACK
+ op_unused_40 FALLBACK
+ op_unused_41 FALLBACK
+ op_unused_42 FALLBACK
+ op_unused_43 FALLBACK
+ op op_aget FALLBACK
+ op op_aget_wide FALLBACK
+ op op_aget_object FALLBACK
+ op op_aget_boolean FALLBACK
+ op op_aget_byte FALLBACK
+ op op_aget_char FALLBACK
+ op op_aget_short FALLBACK
+ op op_aput FALLBACK
+ op op_aput_wide FALLBACK
+ op op_aput_object FALLBACK
+ op op_aput_boolean FALLBACK
+ op op_aput_byte FALLBACK
+ op op_aput_char FALLBACK
+ op op_aput_short FALLBACK
+ op op_iget FALLBACK
+ op op_iget_wide FALLBACK
+ op op_iget_object FALLBACK
+ op op_iget_boolean FALLBACK
+ op op_iget_byte FALLBACK
+ op op_iget_char FALLBACK
+ op op_iget_short FALLBACK
+ op op_iput FALLBACK
+ op op_iput_wide FALLBACK
+ op op_iput_object FALLBACK
+ op op_iput_boolean FALLBACK
+ op op_iput_byte FALLBACK
+ op op_iput_char FALLBACK
+ op op_iput_short FALLBACK
+ op op_sget FALLBACK
+ op op_sget_wide FALLBACK
+ op op_sget_object FALLBACK
+ op op_sget_boolean FALLBACK
+ op op_sget_byte FALLBACK
+ op op_sget_char FALLBACK
+ op op_sget_short FALLBACK
+ op op_sput FALLBACK
+ op op_sput_wide FALLBACK
+ op op_sput_object FALLBACK
+ op op_sput_boolean FALLBACK
+ op op_sput_byte FALLBACK
+ op op_sput_char FALLBACK
+ op op_sput_short FALLBACK
+ op op_invoke_virtual FALLBACK
+ op op_invoke_super FALLBACK
+ op op_invoke_direct FALLBACK
+ op op_invoke_static FALLBACK
+ op op_invoke_interface FALLBACK
+ op op_return_void_no_barrier FALLBACK
+ op op_invoke_virtual_range FALLBACK
+ op op_invoke_super_range FALLBACK
+ op op_invoke_direct_range FALLBACK
+ op op_invoke_static_range FALLBACK
+ op op_invoke_interface_range FALLBACK
+ op_unused_79 FALLBACK
+ op_unused_7a FALLBACK
+ op op_neg_int FALLBACK
+ op op_not_int FALLBACK
+ op op_neg_long FALLBACK
+ op op_not_long FALLBACK
+ op op_neg_float FALLBACK
+ op op_neg_double FALLBACK
+ op op_int_to_long FALLBACK
+ op op_int_to_float FALLBACK
+ op op_int_to_double FALLBACK
+ op op_long_to_int FALLBACK
+ op op_long_to_float FALLBACK
+ op op_long_to_double FALLBACK
+ op op_float_to_int FALLBACK
+ op op_float_to_long FALLBACK
+ op op_float_to_double FALLBACK
+ op op_double_to_int FALLBACK
+ op op_double_to_long FALLBACK
+ op op_double_to_float FALLBACK
+ op op_int_to_byte FALLBACK
+ op op_int_to_char FALLBACK
+ op op_int_to_short FALLBACK
+ op op_add_int FALLBACK
+ op op_sub_int FALLBACK
+ op op_mul_int FALLBACK
+ op op_div_int FALLBACK
+ op op_rem_int FALLBACK
+ op op_and_int FALLBACK
+ op op_or_int FALLBACK
+ op op_xor_int FALLBACK
+ op op_shl_int FALLBACK
+ op op_shr_int FALLBACK
+ op op_ushr_int FALLBACK
+ op op_add_long FALLBACK
+ op op_sub_long FALLBACK
+ op op_mul_long FALLBACK
+ op op_div_long FALLBACK
+ op op_rem_long FALLBACK
+ op op_and_long FALLBACK
+ op op_or_long FALLBACK
+ op op_xor_long FALLBACK
+ op op_shl_long FALLBACK
+ op op_shr_long FALLBACK
+ op op_ushr_long FALLBACK
+ op op_add_float FALLBACK
+ op op_sub_float FALLBACK
+ op op_mul_float FALLBACK
+ op op_div_float FALLBACK
+ op op_rem_float FALLBACK
+ op op_add_double FALLBACK
+ op op_sub_double FALLBACK
+ op op_mul_double FALLBACK
+ op op_div_double FALLBACK
+ op op_rem_double FALLBACK
+ op op_add_int_2addr FALLBACK
+ op op_sub_int_2addr FALLBACK
+ op op_mul_int_2addr FALLBACK
+ op op_div_int_2addr FALLBACK
+ op op_rem_int_2addr FALLBACK
+ op op_and_int_2addr FALLBACK
+ op op_or_int_2addr FALLBACK
+ op op_xor_int_2addr FALLBACK
+ op op_shl_int_2addr FALLBACK
+ op op_shr_int_2addr FALLBACK
+ op op_ushr_int_2addr FALLBACK
+ op op_add_long_2addr FALLBACK
+ op op_sub_long_2addr FALLBACK
+ op op_mul_long_2addr FALLBACK
+ op op_div_long_2addr FALLBACK
+ op op_rem_long_2addr FALLBACK
+ op op_and_long_2addr FALLBACK
+ op op_or_long_2addr FALLBACK
+ op op_xor_long_2addr FALLBACK
+ op op_shl_long_2addr FALLBACK
+ op op_shr_long_2addr FALLBACK
+ op op_ushr_long_2addr FALLBACK
+ op op_add_float_2addr FALLBACK
+ op op_sub_float_2addr FALLBACK
+ op op_mul_float_2addr FALLBACK
+ op op_div_float_2addr FALLBACK
+ op op_rem_float_2addr FALLBACK
+ op op_add_double_2addr FALLBACK
+ op op_sub_double_2addr FALLBACK
+ op op_mul_double_2addr FALLBACK
+ op op_div_double_2addr FALLBACK
+ op op_rem_double_2addr FALLBACK
+ op op_add_int_lit16 FALLBACK
+ op op_rsub_int FALLBACK
+ op op_mul_int_lit16 FALLBACK
+ op op_div_int_lit16 FALLBACK
+ op op_rem_int_lit16 FALLBACK
+ op op_and_int_lit16 FALLBACK
+ op op_or_int_lit16 FALLBACK
+ op op_xor_int_lit16 FALLBACK
+ op op_add_int_lit8 FALLBACK
+ op op_rsub_int_lit8 FALLBACK
+ op op_mul_int_lit8 FALLBACK
+ op op_div_int_lit8 FALLBACK
+ op op_rem_int_lit8 FALLBACK
+ op op_and_int_lit8 FALLBACK
+ op op_or_int_lit8 FALLBACK
+ op op_xor_int_lit8 FALLBACK
+ op op_shl_int_lit8 FALLBACK
+ op op_shr_int_lit8 FALLBACK
+ op op_ushr_int_lit8 FALLBACK
+ op op_iget_quick FALLBACK
+ op op_iget_wide_quick FALLBACK
+ op op_iget_object_quick FALLBACK
+ op op_iput_quick FALLBACK
+ op op_iput_wide_quick FALLBACK
+ op op_iput_object_quick FALLBACK
+ op op_invoke_virtual_quick FALLBACK
+ op op_invoke_virtual_range_quick FALLBACK
+ op op_iput_boolean_quick FALLBACK
+ op op_iput_byte_quick FALLBACK
+ op op_iput_char_quick FALLBACK
+ op op_iput_short_quick FALLBACK
+ op op_iget_boolean_quick FALLBACK
+ op op_iget_byte_quick FALLBACK
+ op op_iget_char_quick FALLBACK
+ op op_iget_short_quick FALLBACK
+ op_unused_f3 FALLBACK
+ op_unused_f4 FALLBACK
+ op_unused_f5 FALLBACK
+ op_unused_f6 FALLBACK
+ op_unused_f7 FALLBACK
+ op_unused_f8 FALLBACK
+ op_unused_f9 FALLBACK
+ op_unused_fa FALLBACK
+ op_unused_fb FALLBACK
+ op_unused_fc FALLBACK
+ op_unused_fd FALLBACK
+ op_unused_fe FALLBACK
+ op_unused_ff FALLBACK
+op-end
+
+# common subroutines for asm
+import arm64/footer.S
diff --git a/runtime/interpreter/mterp/config_mips b/runtime/interpreter/mterp/config_mips
new file mode 100644
index 0000000000..d1221f731b
--- /dev/null
+++ b/runtime/interpreter/mterp/config_mips
@@ -0,0 +1,298 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for MIPS_32
+#
+
+handler-style computed-goto
+handler-size 128
+
+# source for alternate entry stub
+asm-alt-stub mips/alt_stub.S
+
+# file header and basic definitions
+import mips/header.S
+
+# arch-specific entry point to interpreter
+import mips/entry.S
+
+# Stub to switch to alternate interpreter
+fallback-stub mips/fallback.S
+
+# opcode list; argument to op-start is default directory
+op-start mips
+ # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
+ # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
+
+ op op_nop FALLBACK
+ op op_move FALLBACK
+ op op_move_from16 FALLBACK
+ op op_move_16 FALLBACK
+ op op_move_wide FALLBACK
+ op op_move_wide_from16 FALLBACK
+ op op_move_wide_16 FALLBACK
+ op op_move_object FALLBACK
+ op op_move_object_from16 FALLBACK
+ op op_move_object_16 FALLBACK
+ op op_move_result FALLBACK
+ op op_move_result_wide FALLBACK
+ op op_move_result_object FALLBACK
+ op op_move_exception FALLBACK
+ op op_return_void FALLBACK
+ op op_return FALLBACK
+ op op_return_wide FALLBACK
+ op op_return_object FALLBACK
+ op op_const_4 FALLBACK
+ op op_const_16 FALLBACK
+ op op_const FALLBACK
+ op op_const_high16 FALLBACK
+ op op_const_wide_16 FALLBACK
+ op op_const_wide_32 FALLBACK
+ op op_const_wide FALLBACK
+ op op_const_wide_high16 FALLBACK
+ op op_const_string FALLBACK
+ op op_const_string_jumbo FALLBACK
+ op op_const_class FALLBACK
+ op op_monitor_enter FALLBACK
+ op op_monitor_exit FALLBACK
+ op op_check_cast FALLBACK
+ op op_instance_of FALLBACK
+ op op_array_length FALLBACK
+ op op_new_instance FALLBACK
+ op op_new_array FALLBACK
+ op op_filled_new_array FALLBACK
+ op op_filled_new_array_range FALLBACK
+ op op_fill_array_data FALLBACK
+ op op_throw FALLBACK
+ op op_goto FALLBACK
+ op op_goto_16 FALLBACK
+ op op_goto_32 FALLBACK
+ op op_packed_switch FALLBACK
+ op op_sparse_switch FALLBACK
+ op op_cmpl_float FALLBACK
+ op op_cmpg_float FALLBACK
+ op op_cmpl_double FALLBACK
+ op op_cmpg_double FALLBACK
+ op op_cmp_long FALLBACK
+ op op_if_eq FALLBACK
+ op op_if_ne FALLBACK
+ op op_if_lt FALLBACK
+ op op_if_ge FALLBACK
+ op op_if_gt FALLBACK
+ op op_if_le FALLBACK
+ op op_if_eqz FALLBACK
+ op op_if_nez FALLBACK
+ op op_if_ltz FALLBACK
+ op op_if_gez FALLBACK
+ op op_if_gtz FALLBACK
+ op op_if_lez FALLBACK
+ op_unused_3e FALLBACK
+ op_unused_3f FALLBACK
+ op_unused_40 FALLBACK
+ op_unused_41 FALLBACK
+ op_unused_42 FALLBACK
+ op_unused_43 FALLBACK
+ op op_aget FALLBACK
+ op op_aget_wide FALLBACK
+ op op_aget_object FALLBACK
+ op op_aget_boolean FALLBACK
+ op op_aget_byte FALLBACK
+ op op_aget_char FALLBACK
+ op op_aget_short FALLBACK
+ op op_aput FALLBACK
+ op op_aput_wide FALLBACK
+ op op_aput_object FALLBACK
+ op op_aput_boolean FALLBACK
+ op op_aput_byte FALLBACK
+ op op_aput_char FALLBACK
+ op op_aput_short FALLBACK
+ op op_iget FALLBACK
+ op op_iget_wide FALLBACK
+ op op_iget_object FALLBACK
+ op op_iget_boolean FALLBACK
+ op op_iget_byte FALLBACK
+ op op_iget_char FALLBACK
+ op op_iget_short FALLBACK
+ op op_iput FALLBACK
+ op op_iput_wide FALLBACK
+ op op_iput_object FALLBACK
+ op op_iput_boolean FALLBACK
+ op op_iput_byte FALLBACK
+ op op_iput_char FALLBACK
+ op op_iput_short FALLBACK
+ op op_sget FALLBACK
+ op op_sget_wide FALLBACK
+ op op_sget_object FALLBACK
+ op op_sget_boolean FALLBACK
+ op op_sget_byte FALLBACK
+ op op_sget_char FALLBACK
+ op op_sget_short FALLBACK
+ op op_sput FALLBACK
+ op op_sput_wide FALLBACK
+ op op_sput_object FALLBACK
+ op op_sput_boolean FALLBACK
+ op op_sput_byte FALLBACK
+ op op_sput_char FALLBACK
+ op op_sput_short FALLBACK
+ op op_invoke_virtual FALLBACK
+ op op_invoke_super FALLBACK
+ op op_invoke_direct FALLBACK
+ op op_invoke_static FALLBACK
+ op op_invoke_interface FALLBACK
+ op op_return_void_no_barrier FALLBACK
+ op op_invoke_virtual_range FALLBACK
+ op op_invoke_super_range FALLBACK
+ op op_invoke_direct_range FALLBACK
+ op op_invoke_static_range FALLBACK
+ op op_invoke_interface_range FALLBACK
+ op_unused_79 FALLBACK
+ op_unused_7a FALLBACK
+ op op_neg_int FALLBACK
+ op op_not_int FALLBACK
+ op op_neg_long FALLBACK
+ op op_not_long FALLBACK
+ op op_neg_float FALLBACK
+ op op_neg_double FALLBACK
+ op op_int_to_long FALLBACK
+ op op_int_to_float FALLBACK
+ op op_int_to_double FALLBACK
+ op op_long_to_int FALLBACK
+ op op_long_to_float FALLBACK
+ op op_long_to_double FALLBACK
+ op op_float_to_int FALLBACK
+ op op_float_to_long FALLBACK
+ op op_float_to_double FALLBACK
+ op op_double_to_int FALLBACK
+ op op_double_to_long FALLBACK
+ op op_double_to_float FALLBACK
+ op op_int_to_byte FALLBACK
+ op op_int_to_char FALLBACK
+ op op_int_to_short FALLBACK
+ op op_add_int FALLBACK
+ op op_sub_int FALLBACK
+ op op_mul_int FALLBACK
+ op op_div_int FALLBACK
+ op op_rem_int FALLBACK
+ op op_and_int FALLBACK
+ op op_or_int FALLBACK
+ op op_xor_int FALLBACK
+ op op_shl_int FALLBACK
+ op op_shr_int FALLBACK
+ op op_ushr_int FALLBACK
+ op op_add_long FALLBACK
+ op op_sub_long FALLBACK
+ op op_mul_long FALLBACK
+ op op_div_long FALLBACK
+ op op_rem_long FALLBACK
+ op op_and_long FALLBACK
+ op op_or_long FALLBACK
+ op op_xor_long FALLBACK
+ op op_shl_long FALLBACK
+ op op_shr_long FALLBACK
+ op op_ushr_long FALLBACK
+ op op_add_float FALLBACK
+ op op_sub_float FALLBACK
+ op op_mul_float FALLBACK
+ op op_div_float FALLBACK
+ op op_rem_float FALLBACK
+ op op_add_double FALLBACK
+ op op_sub_double FALLBACK
+ op op_mul_double FALLBACK
+ op op_div_double FALLBACK
+ op op_rem_double FALLBACK
+ op op_add_int_2addr FALLBACK
+ op op_sub_int_2addr FALLBACK
+ op op_mul_int_2addr FALLBACK
+ op op_div_int_2addr FALLBACK
+ op op_rem_int_2addr FALLBACK
+ op op_and_int_2addr FALLBACK
+ op op_or_int_2addr FALLBACK
+ op op_xor_int_2addr FALLBACK
+ op op_shl_int_2addr FALLBACK
+ op op_shr_int_2addr FALLBACK
+ op op_ushr_int_2addr FALLBACK
+ op op_add_long_2addr FALLBACK
+ op op_sub_long_2addr FALLBACK
+ op op_mul_long_2addr FALLBACK
+ op op_div_long_2addr FALLBACK
+ op op_rem_long_2addr FALLBACK
+ op op_and_long_2addr FALLBACK
+ op op_or_long_2addr FALLBACK
+ op op_xor_long_2addr FALLBACK
+ op op_shl_long_2addr FALLBACK
+ op op_shr_long_2addr FALLBACK
+ op op_ushr_long_2addr FALLBACK
+ op op_add_float_2addr FALLBACK
+ op op_sub_float_2addr FALLBACK
+ op op_mul_float_2addr FALLBACK
+ op op_div_float_2addr FALLBACK
+ op op_rem_float_2addr FALLBACK
+ op op_add_double_2addr FALLBACK
+ op op_sub_double_2addr FALLBACK
+ op op_mul_double_2addr FALLBACK
+ op op_div_double_2addr FALLBACK
+ op op_rem_double_2addr FALLBACK
+ op op_add_int_lit16 FALLBACK
+ op op_rsub_int FALLBACK
+ op op_mul_int_lit16 FALLBACK
+ op op_div_int_lit16 FALLBACK
+ op op_rem_int_lit16 FALLBACK
+ op op_and_int_lit16 FALLBACK
+ op op_or_int_lit16 FALLBACK
+ op op_xor_int_lit16 FALLBACK
+ op op_add_int_lit8 FALLBACK
+ op op_rsub_int_lit8 FALLBACK
+ op op_mul_int_lit8 FALLBACK
+ op op_div_int_lit8 FALLBACK
+ op op_rem_int_lit8 FALLBACK
+ op op_and_int_lit8 FALLBACK
+ op op_or_int_lit8 FALLBACK
+ op op_xor_int_lit8 FALLBACK
+ op op_shl_int_lit8 FALLBACK
+ op op_shr_int_lit8 FALLBACK
+ op op_ushr_int_lit8 FALLBACK
+ op op_iget_quick FALLBACK
+ op op_iget_wide_quick FALLBACK
+ op op_iget_object_quick FALLBACK
+ op op_iput_quick FALLBACK
+ op op_iput_wide_quick FALLBACK
+ op op_iput_object_quick FALLBACK
+ op op_invoke_virtual_quick FALLBACK
+ op op_invoke_virtual_range_quick FALLBACK
+ op op_iput_boolean_quick FALLBACK
+ op op_iput_byte_quick FALLBACK
+ op op_iput_char_quick FALLBACK
+ op op_iput_short_quick FALLBACK
+ op op_iget_boolean_quick FALLBACK
+ op op_iget_byte_quick FALLBACK
+ op op_iget_char_quick FALLBACK
+ op op_iget_short_quick FALLBACK
+ op_unused_f3 FALLBACK
+ op_unused_f4 FALLBACK
+ op_unused_f5 FALLBACK
+ op_unused_f6 FALLBACK
+ op_unused_f7 FALLBACK
+ op_unused_f8 FALLBACK
+ op_unused_f9 FALLBACK
+ op_unused_fa FALLBACK
+ op_unused_fb FALLBACK
+ op_unused_fc FALLBACK
+ op_unused_fd FALLBACK
+ op_unused_fe FALLBACK
+ op_unused_ff FALLBACK
+op-end
+
+# common subroutines for asm
+import mips/footer.S
diff --git a/runtime/interpreter/mterp/config_mips64 b/runtime/interpreter/mterp/config_mips64
new file mode 100644
index 0000000000..f804ce5566
--- /dev/null
+++ b/runtime/interpreter/mterp/config_mips64
@@ -0,0 +1,298 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for MIPS_64
+#
+
+handler-style computed-goto
+handler-size 128
+
+# source for alternate entry stub
+asm-alt-stub mips64/alt_stub.S
+
+# file header and basic definitions
+import mips64/header.S
+
+# arch-specific entry point to interpreter
+import mips64/entry.S
+
+# Stub to switch to alternate interpreter
+fallback-stub mips64/fallback.S
+
+# opcode list; argument to op-start is default directory
+op-start mips64
+ # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
+ # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
+
+ op op_nop FALLBACK
+ op op_move FALLBACK
+ op op_move_from16 FALLBACK
+ op op_move_16 FALLBACK
+ op op_move_wide FALLBACK
+ op op_move_wide_from16 FALLBACK
+ op op_move_wide_16 FALLBACK
+ op op_move_object FALLBACK
+ op op_move_object_from16 FALLBACK
+ op op_move_object_16 FALLBACK
+ op op_move_result FALLBACK
+ op op_move_result_wide FALLBACK
+ op op_move_result_object FALLBACK
+ op op_move_exception FALLBACK
+ op op_return_void FALLBACK
+ op op_return FALLBACK
+ op op_return_wide FALLBACK
+ op op_return_object FALLBACK
+ op op_const_4 FALLBACK
+ op op_const_16 FALLBACK
+ op op_const FALLBACK
+ op op_const_high16 FALLBACK
+ op op_const_wide_16 FALLBACK
+ op op_const_wide_32 FALLBACK
+ op op_const_wide FALLBACK
+ op op_const_wide_high16 FALLBACK
+ op op_const_string FALLBACK
+ op op_const_string_jumbo FALLBACK
+ op op_const_class FALLBACK
+ op op_monitor_enter FALLBACK
+ op op_monitor_exit FALLBACK
+ op op_check_cast FALLBACK
+ op op_instance_of FALLBACK
+ op op_array_length FALLBACK
+ op op_new_instance FALLBACK
+ op op_new_array FALLBACK
+ op op_filled_new_array FALLBACK
+ op op_filled_new_array_range FALLBACK
+ op op_fill_array_data FALLBACK
+ op op_throw FALLBACK
+ op op_goto FALLBACK
+ op op_goto_16 FALLBACK
+ op op_goto_32 FALLBACK
+ op op_packed_switch FALLBACK
+ op op_sparse_switch FALLBACK
+ op op_cmpl_float FALLBACK
+ op op_cmpg_float FALLBACK
+ op op_cmpl_double FALLBACK
+ op op_cmpg_double FALLBACK
+ op op_cmp_long FALLBACK
+ op op_if_eq FALLBACK
+ op op_if_ne FALLBACK
+ op op_if_lt FALLBACK
+ op op_if_ge FALLBACK
+ op op_if_gt FALLBACK
+ op op_if_le FALLBACK
+ op op_if_eqz FALLBACK
+ op op_if_nez FALLBACK
+ op op_if_ltz FALLBACK
+ op op_if_gez FALLBACK
+ op op_if_gtz FALLBACK
+ op op_if_lez FALLBACK
+ op_unused_3e FALLBACK
+ op_unused_3f FALLBACK
+ op_unused_40 FALLBACK
+ op_unused_41 FALLBACK
+ op_unused_42 FALLBACK
+ op_unused_43 FALLBACK
+ op op_aget FALLBACK
+ op op_aget_wide FALLBACK
+ op op_aget_object FALLBACK
+ op op_aget_boolean FALLBACK
+ op op_aget_byte FALLBACK
+ op op_aget_char FALLBACK
+ op op_aget_short FALLBACK
+ op op_aput FALLBACK
+ op op_aput_wide FALLBACK
+ op op_aput_object FALLBACK
+ op op_aput_boolean FALLBACK
+ op op_aput_byte FALLBACK
+ op op_aput_char FALLBACK
+ op op_aput_short FALLBACK
+ op op_iget FALLBACK
+ op op_iget_wide FALLBACK
+ op op_iget_object FALLBACK
+ op op_iget_boolean FALLBACK
+ op op_iget_byte FALLBACK
+ op op_iget_char FALLBACK
+ op op_iget_short FALLBACK
+ op op_iput FALLBACK
+ op op_iput_wide FALLBACK
+ op op_iput_object FALLBACK
+ op op_iput_boolean FALLBACK
+ op op_iput_byte FALLBACK
+ op op_iput_char FALLBACK
+ op op_iput_short FALLBACK
+ op op_sget FALLBACK
+ op op_sget_wide FALLBACK
+ op op_sget_object FALLBACK
+ op op_sget_boolean FALLBACK
+ op op_sget_byte FALLBACK
+ op op_sget_char FALLBACK
+ op op_sget_short FALLBACK
+ op op_sput FALLBACK
+ op op_sput_wide FALLBACK
+ op op_sput_object FALLBACK
+ op op_sput_boolean FALLBACK
+ op op_sput_byte FALLBACK
+ op op_sput_char FALLBACK
+ op op_sput_short FALLBACK
+ op op_invoke_virtual FALLBACK
+ op op_invoke_super FALLBACK
+ op op_invoke_direct FALLBACK
+ op op_invoke_static FALLBACK
+ op op_invoke_interface FALLBACK
+ op op_return_void_no_barrier FALLBACK
+ op op_invoke_virtual_range FALLBACK
+ op op_invoke_super_range FALLBACK
+ op op_invoke_direct_range FALLBACK
+ op op_invoke_static_range FALLBACK
+ op op_invoke_interface_range FALLBACK
+ op_unused_79 FALLBACK
+ op_unused_7a FALLBACK
+ op op_neg_int FALLBACK
+ op op_not_int FALLBACK
+ op op_neg_long FALLBACK
+ op op_not_long FALLBACK
+ op op_neg_float FALLBACK
+ op op_neg_double FALLBACK
+ op op_int_to_long FALLBACK
+ op op_int_to_float FALLBACK
+ op op_int_to_double FALLBACK
+ op op_long_to_int FALLBACK
+ op op_long_to_float FALLBACK
+ op op_long_to_double FALLBACK
+ op op_float_to_int FALLBACK
+ op op_float_to_long FALLBACK
+ op op_float_to_double FALLBACK
+ op op_double_to_int FALLBACK
+ op op_double_to_long FALLBACK
+ op op_double_to_float FALLBACK
+ op op_int_to_byte FALLBACK
+ op op_int_to_char FALLBACK
+ op op_int_to_short FALLBACK
+ op op_add_int FALLBACK
+ op op_sub_int FALLBACK
+ op op_mul_int FALLBACK
+ op op_div_int FALLBACK
+ op op_rem_int FALLBACK
+ op op_and_int FALLBACK
+ op op_or_int FALLBACK
+ op op_xor_int FALLBACK
+ op op_shl_int FALLBACK
+ op op_shr_int FALLBACK
+ op op_ushr_int FALLBACK
+ op op_add_long FALLBACK
+ op op_sub_long FALLBACK
+ op op_mul_long FALLBACK
+ op op_div_long FALLBACK
+ op op_rem_long FALLBACK
+ op op_and_long FALLBACK
+ op op_or_long FALLBACK
+ op op_xor_long FALLBACK
+ op op_shl_long FALLBACK
+ op op_shr_long FALLBACK
+ op op_ushr_long FALLBACK
+ op op_add_float FALLBACK
+ op op_sub_float FALLBACK
+ op op_mul_float FALLBACK
+ op op_div_float FALLBACK
+ op op_rem_float FALLBACK
+ op op_add_double FALLBACK
+ op op_sub_double FALLBACK
+ op op_mul_double FALLBACK
+ op op_div_double FALLBACK
+ op op_rem_double FALLBACK
+ op op_add_int_2addr FALLBACK
+ op op_sub_int_2addr FALLBACK
+ op op_mul_int_2addr FALLBACK
+ op op_div_int_2addr FALLBACK
+ op op_rem_int_2addr FALLBACK
+ op op_and_int_2addr FALLBACK
+ op op_or_int_2addr FALLBACK
+ op op_xor_int_2addr FALLBACK
+ op op_shl_int_2addr FALLBACK
+ op op_shr_int_2addr FALLBACK
+ op op_ushr_int_2addr FALLBACK
+ op op_add_long_2addr FALLBACK
+ op op_sub_long_2addr FALLBACK
+ op op_mul_long_2addr FALLBACK
+ op op_div_long_2addr FALLBACK
+ op op_rem_long_2addr FALLBACK
+ op op_and_long_2addr FALLBACK
+ op op_or_long_2addr FALLBACK
+ op op_xor_long_2addr FALLBACK
+ op op_shl_long_2addr FALLBACK
+ op op_shr_long_2addr FALLBACK
+ op op_ushr_long_2addr FALLBACK
+ op op_add_float_2addr FALLBACK
+ op op_sub_float_2addr FALLBACK
+ op op_mul_float_2addr FALLBACK
+ op op_div_float_2addr FALLBACK
+ op op_rem_float_2addr FALLBACK
+ op op_add_double_2addr FALLBACK
+ op op_sub_double_2addr FALLBACK
+ op op_mul_double_2addr FALLBACK
+ op op_div_double_2addr FALLBACK
+ op op_rem_double_2addr FALLBACK
+ op op_add_int_lit16 FALLBACK
+ op op_rsub_int FALLBACK
+ op op_mul_int_lit16 FALLBACK
+ op op_div_int_lit16 FALLBACK
+ op op_rem_int_lit16 FALLBACK
+ op op_and_int_lit16 FALLBACK
+ op op_or_int_lit16 FALLBACK
+ op op_xor_int_lit16 FALLBACK
+ op op_add_int_lit8 FALLBACK
+ op op_rsub_int_lit8 FALLBACK
+ op op_mul_int_lit8 FALLBACK
+ op op_div_int_lit8 FALLBACK
+ op op_rem_int_lit8 FALLBACK
+ op op_and_int_lit8 FALLBACK
+ op op_or_int_lit8 FALLBACK
+ op op_xor_int_lit8 FALLBACK
+ op op_shl_int_lit8 FALLBACK
+ op op_shr_int_lit8 FALLBACK
+ op op_ushr_int_lit8 FALLBACK
+ op op_iget_quick FALLBACK
+ op op_iget_wide_quick FALLBACK
+ op op_iget_object_quick FALLBACK
+ op op_iput_quick FALLBACK
+ op op_iput_wide_quick FALLBACK
+ op op_iput_object_quick FALLBACK
+ op op_invoke_virtual_quick FALLBACK
+ op op_invoke_virtual_range_quick FALLBACK
+ op op_iput_boolean_quick FALLBACK
+ op op_iput_byte_quick FALLBACK
+ op op_iput_char_quick FALLBACK
+ op op_iput_short_quick FALLBACK
+ op op_iget_boolean_quick FALLBACK
+ op op_iget_byte_quick FALLBACK
+ op op_iget_char_quick FALLBACK
+ op op_iget_short_quick FALLBACK
+ op_unused_f3 FALLBACK
+ op_unused_f4 FALLBACK
+ op_unused_f5 FALLBACK
+ op_unused_f6 FALLBACK
+ op_unused_f7 FALLBACK
+ op_unused_f8 FALLBACK
+ op_unused_f9 FALLBACK
+ op_unused_fa FALLBACK
+ op_unused_fb FALLBACK
+ op_unused_fc FALLBACK
+ op_unused_fd FALLBACK
+ op_unused_fe FALLBACK
+ op_unused_ff FALLBACK
+op-end
+
+# common subroutines for asm
+import mips64/footer.S
diff --git a/runtime/interpreter/mterp/config_x86 b/runtime/interpreter/mterp/config_x86
new file mode 100644
index 0000000000..277817d92e
--- /dev/null
+++ b/runtime/interpreter/mterp/config_x86
@@ -0,0 +1,298 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for X86
+#
+
+handler-style computed-goto
+handler-size 128
+
+# source for alternate entry stub
+asm-alt-stub x86/alt_stub.S
+
+# file header and basic definitions
+import x86/header.S
+
+# arch-specific entry point to interpreter
+import x86/entry.S
+
+# Stub to switch to alternate interpreter
+fallback-stub x86/fallback.S
+
+# opcode list; argument to op-start is default directory
+op-start x86
+ # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
+ # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
+
+ op op_nop FALLBACK
+ op op_move FALLBACK
+ op op_move_from16 FALLBACK
+ op op_move_16 FALLBACK
+ op op_move_wide FALLBACK
+ op op_move_wide_from16 FALLBACK
+ op op_move_wide_16 FALLBACK
+ op op_move_object FALLBACK
+ op op_move_object_from16 FALLBACK
+ op op_move_object_16 FALLBACK
+ op op_move_result FALLBACK
+ op op_move_result_wide FALLBACK
+ op op_move_result_object FALLBACK
+ op op_move_exception FALLBACK
+ op op_return_void FALLBACK
+ op op_return FALLBACK
+ op op_return_wide FALLBACK
+ op op_return_object FALLBACK
+ op op_const_4 FALLBACK
+ op op_const_16 FALLBACK
+ op op_const FALLBACK
+ op op_const_high16 FALLBACK
+ op op_const_wide_16 FALLBACK
+ op op_const_wide_32 FALLBACK
+ op op_const_wide FALLBACK
+ op op_const_wide_high16 FALLBACK
+ op op_const_string FALLBACK
+ op op_const_string_jumbo FALLBACK
+ op op_const_class FALLBACK
+ op op_monitor_enter FALLBACK
+ op op_monitor_exit FALLBACK
+ op op_check_cast FALLBACK
+ op op_instance_of FALLBACK
+ op op_array_length FALLBACK
+ op op_new_instance FALLBACK
+ op op_new_array FALLBACK
+ op op_filled_new_array FALLBACK
+ op op_filled_new_array_range FALLBACK
+ op op_fill_array_data FALLBACK
+ op op_throw FALLBACK
+ op op_goto FALLBACK
+ op op_goto_16 FALLBACK
+ op op_goto_32 FALLBACK
+ op op_packed_switch FALLBACK
+ op op_sparse_switch FALLBACK
+ op op_cmpl_float FALLBACK
+ op op_cmpg_float FALLBACK
+ op op_cmpl_double FALLBACK
+ op op_cmpg_double FALLBACK
+ op op_cmp_long FALLBACK
+ op op_if_eq FALLBACK
+ op op_if_ne FALLBACK
+ op op_if_lt FALLBACK
+ op op_if_ge FALLBACK
+ op op_if_gt FALLBACK
+ op op_if_le FALLBACK
+ op op_if_eqz FALLBACK
+ op op_if_nez FALLBACK
+ op op_if_ltz FALLBACK
+ op op_if_gez FALLBACK
+ op op_if_gtz FALLBACK
+ op op_if_lez FALLBACK
+ op_unused_3e FALLBACK
+ op_unused_3f FALLBACK
+ op_unused_40 FALLBACK
+ op_unused_41 FALLBACK
+ op_unused_42 FALLBACK
+ op_unused_43 FALLBACK
+ op op_aget FALLBACK
+ op op_aget_wide FALLBACK
+ op op_aget_object FALLBACK
+ op op_aget_boolean FALLBACK
+ op op_aget_byte FALLBACK
+ op op_aget_char FALLBACK
+ op op_aget_short FALLBACK
+ op op_aput FALLBACK
+ op op_aput_wide FALLBACK
+ op op_aput_object FALLBACK
+ op op_aput_boolean FALLBACK
+ op op_aput_byte FALLBACK
+ op op_aput_char FALLBACK
+ op op_aput_short FALLBACK
+ op op_iget FALLBACK
+ op op_iget_wide FALLBACK
+ op op_iget_object FALLBACK
+ op op_iget_boolean FALLBACK
+ op op_iget_byte FALLBACK
+ op op_iget_char FALLBACK
+ op op_iget_short FALLBACK
+ op op_iput FALLBACK
+ op op_iput_wide FALLBACK
+ op op_iput_object FALLBACK
+ op op_iput_boolean FALLBACK
+ op op_iput_byte FALLBACK
+ op op_iput_char FALLBACK
+ op op_iput_short FALLBACK
+ op op_sget FALLBACK
+ op op_sget_wide FALLBACK
+ op op_sget_object FALLBACK
+ op op_sget_boolean FALLBACK
+ op op_sget_byte FALLBACK
+ op op_sget_char FALLBACK
+ op op_sget_short FALLBACK
+ op op_sput FALLBACK
+ op op_sput_wide FALLBACK
+ op op_sput_object FALLBACK
+ op op_sput_boolean FALLBACK
+ op op_sput_byte FALLBACK
+ op op_sput_char FALLBACK
+ op op_sput_short FALLBACK
+ op op_invoke_virtual FALLBACK
+ op op_invoke_super FALLBACK
+ op op_invoke_direct FALLBACK
+ op op_invoke_static FALLBACK
+ op op_invoke_interface FALLBACK
+ op op_return_void_no_barrier FALLBACK
+ op op_invoke_virtual_range FALLBACK
+ op op_invoke_super_range FALLBACK
+ op op_invoke_direct_range FALLBACK
+ op op_invoke_static_range FALLBACK
+ op op_invoke_interface_range FALLBACK
+ op_unused_79 FALLBACK
+ op_unused_7a FALLBACK
+ op op_neg_int FALLBACK
+ op op_not_int FALLBACK
+ op op_neg_long FALLBACK
+ op op_not_long FALLBACK
+ op op_neg_float FALLBACK
+ op op_neg_double FALLBACK
+ op op_int_to_long FALLBACK
+ op op_int_to_float FALLBACK
+ op op_int_to_double FALLBACK
+ op op_long_to_int FALLBACK
+ op op_long_to_float FALLBACK
+ op op_long_to_double FALLBACK
+ op op_float_to_int FALLBACK
+ op op_float_to_long FALLBACK
+ op op_float_to_double FALLBACK
+ op op_double_to_int FALLBACK
+ op op_double_to_long FALLBACK
+ op op_double_to_float FALLBACK
+ op op_int_to_byte FALLBACK
+ op op_int_to_char FALLBACK
+ op op_int_to_short FALLBACK
+ op op_add_int FALLBACK
+ op op_sub_int FALLBACK
+ op op_mul_int FALLBACK
+ op op_div_int FALLBACK
+ op op_rem_int FALLBACK
+ op op_and_int FALLBACK
+ op op_or_int FALLBACK
+ op op_xor_int FALLBACK
+ op op_shl_int FALLBACK
+ op op_shr_int FALLBACK
+ op op_ushr_int FALLBACK
+ op op_add_long FALLBACK
+ op op_sub_long FALLBACK
+ op op_mul_long FALLBACK
+ op op_div_long FALLBACK
+ op op_rem_long FALLBACK
+ op op_and_long FALLBACK
+ op op_or_long FALLBACK
+ op op_xor_long FALLBACK
+ op op_shl_long FALLBACK
+ op op_shr_long FALLBACK
+ op op_ushr_long FALLBACK
+ op op_add_float FALLBACK
+ op op_sub_float FALLBACK
+ op op_mul_float FALLBACK
+ op op_div_float FALLBACK
+ op op_rem_float FALLBACK
+ op op_add_double FALLBACK
+ op op_sub_double FALLBACK
+ op op_mul_double FALLBACK
+ op op_div_double FALLBACK
+ op op_rem_double FALLBACK
+ op op_add_int_2addr FALLBACK
+ op op_sub_int_2addr FALLBACK
+ op op_mul_int_2addr FALLBACK
+ op op_div_int_2addr FALLBACK
+ op op_rem_int_2addr FALLBACK
+ op op_and_int_2addr FALLBACK
+ op op_or_int_2addr FALLBACK
+ op op_xor_int_2addr FALLBACK
+ op op_shl_int_2addr FALLBACK
+ op op_shr_int_2addr FALLBACK
+ op op_ushr_int_2addr FALLBACK
+ op op_add_long_2addr FALLBACK
+ op op_sub_long_2addr FALLBACK
+ op op_mul_long_2addr FALLBACK
+ op op_div_long_2addr FALLBACK
+ op op_rem_long_2addr FALLBACK
+ op op_and_long_2addr FALLBACK
+ op op_or_long_2addr FALLBACK
+ op op_xor_long_2addr FALLBACK
+ op op_shl_long_2addr FALLBACK
+ op op_shr_long_2addr FALLBACK
+ op op_ushr_long_2addr FALLBACK
+ op op_add_float_2addr FALLBACK
+ op op_sub_float_2addr FALLBACK
+ op op_mul_float_2addr FALLBACK
+ op op_div_float_2addr FALLBACK
+ op op_rem_float_2addr FALLBACK
+ op op_add_double_2addr FALLBACK
+ op op_sub_double_2addr FALLBACK
+ op op_mul_double_2addr FALLBACK
+ op op_div_double_2addr FALLBACK
+ op op_rem_double_2addr FALLBACK
+ op op_add_int_lit16 FALLBACK
+ op op_rsub_int FALLBACK
+ op op_mul_int_lit16 FALLBACK
+ op op_div_int_lit16 FALLBACK
+ op op_rem_int_lit16 FALLBACK
+ op op_and_int_lit16 FALLBACK
+ op op_or_int_lit16 FALLBACK
+ op op_xor_int_lit16 FALLBACK
+ op op_add_int_lit8 FALLBACK
+ op op_rsub_int_lit8 FALLBACK
+ op op_mul_int_lit8 FALLBACK
+ op op_div_int_lit8 FALLBACK
+ op op_rem_int_lit8 FALLBACK
+ op op_and_int_lit8 FALLBACK
+ op op_or_int_lit8 FALLBACK
+ op op_xor_int_lit8 FALLBACK
+ op op_shl_int_lit8 FALLBACK
+ op op_shr_int_lit8 FALLBACK
+ op op_ushr_int_lit8 FALLBACK
+ op op_iget_quick FALLBACK
+ op op_iget_wide_quick FALLBACK
+ op op_iget_object_quick FALLBACK
+ op op_iput_quick FALLBACK
+ op op_iput_wide_quick FALLBACK
+ op op_iput_object_quick FALLBACK
+ op op_invoke_virtual_quick FALLBACK
+ op op_invoke_virtual_range_quick FALLBACK
+ op op_iput_boolean_quick FALLBACK
+ op op_iput_byte_quick FALLBACK
+ op op_iput_char_quick FALLBACK
+ op op_iput_short_quick FALLBACK
+ op op_iget_boolean_quick FALLBACK
+ op op_iget_byte_quick FALLBACK
+ op op_iget_char_quick FALLBACK
+ op op_iget_short_quick FALLBACK
+ op_unused_f3 FALLBACK
+ op_unused_f4 FALLBACK
+ op_unused_f5 FALLBACK
+ op_unused_f6 FALLBACK
+ op_unused_f7 FALLBACK
+ op_unused_f8 FALLBACK
+ op_unused_f9 FALLBACK
+ op_unused_fa FALLBACK
+ op_unused_fb FALLBACK
+ op_unused_fc FALLBACK
+ op_unused_fd FALLBACK
+ op_unused_fe FALLBACK
+ op_unused_ff FALLBACK
+op-end
+
+# common subroutines for asm
+import x86/footer.S
diff --git a/runtime/interpreter/mterp/config_x86_64 b/runtime/interpreter/mterp/config_x86_64
new file mode 100644
index 0000000000..a002dc2873
--- /dev/null
+++ b/runtime/interpreter/mterp/config_x86_64
@@ -0,0 +1,298 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for X86_64
+#
+
+handler-style computed-goto
+handler-size 128
+
+# source for alternate entry stub
+asm-alt-stub x86_64/alt_stub.S
+
+# file header and basic definitions
+import x86_64/header.S
+
+# arch-specific entry point to interpreter
+import x86_64/entry.S
+
+# Stub to switch to alternate interpreter
+fallback-stub x86_64/fallback.S
+
+# opcode list; argument to op-start is default directory
+op-start x86_64
+ # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
+ # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
+
+ op op_nop FALLBACK
+ op op_move FALLBACK
+ op op_move_from16 FALLBACK
+ op op_move_16 FALLBACK
+ op op_move_wide FALLBACK
+ op op_move_wide_from16 FALLBACK
+ op op_move_wide_16 FALLBACK
+ op op_move_object FALLBACK
+ op op_move_object_from16 FALLBACK
+ op op_move_object_16 FALLBACK
+ op op_move_result FALLBACK
+ op op_move_result_wide FALLBACK
+ op op_move_result_object FALLBACK
+ op op_move_exception FALLBACK
+ op op_return_void FALLBACK
+ op op_return FALLBACK
+ op op_return_wide FALLBACK
+ op op_return_object FALLBACK
+ op op_const_4 FALLBACK
+ op op_const_16 FALLBACK
+ op op_const FALLBACK
+ op op_const_high16 FALLBACK
+ op op_const_wide_16 FALLBACK
+ op op_const_wide_32 FALLBACK
+ op op_const_wide FALLBACK
+ op op_const_wide_high16 FALLBACK
+ op op_const_string FALLBACK
+ op op_const_string_jumbo FALLBACK
+ op op_const_class FALLBACK
+ op op_monitor_enter FALLBACK
+ op op_monitor_exit FALLBACK
+ op op_check_cast FALLBACK
+ op op_instance_of FALLBACK
+ op op_array_length FALLBACK
+ op op_new_instance FALLBACK
+ op op_new_array FALLBACK
+ op op_filled_new_array FALLBACK
+ op op_filled_new_array_range FALLBACK
+ op op_fill_array_data FALLBACK
+ op op_throw FALLBACK
+ op op_goto FALLBACK
+ op op_goto_16 FALLBACK
+ op op_goto_32 FALLBACK
+ op op_packed_switch FALLBACK
+ op op_sparse_switch FALLBACK
+ op op_cmpl_float FALLBACK
+ op op_cmpg_float FALLBACK
+ op op_cmpl_double FALLBACK
+ op op_cmpg_double FALLBACK
+ op op_cmp_long FALLBACK
+ op op_if_eq FALLBACK
+ op op_if_ne FALLBACK
+ op op_if_lt FALLBACK
+ op op_if_ge FALLBACK
+ op op_if_gt FALLBACK
+ op op_if_le FALLBACK
+ op op_if_eqz FALLBACK
+ op op_if_nez FALLBACK
+ op op_if_ltz FALLBACK
+ op op_if_gez FALLBACK
+ op op_if_gtz FALLBACK
+ op op_if_lez FALLBACK
+ op_unused_3e FALLBACK
+ op_unused_3f FALLBACK
+ op_unused_40 FALLBACK
+ op_unused_41 FALLBACK
+ op_unused_42 FALLBACK
+ op_unused_43 FALLBACK
+ op op_aget FALLBACK
+ op op_aget_wide FALLBACK
+ op op_aget_object FALLBACK
+ op op_aget_boolean FALLBACK
+ op op_aget_byte FALLBACK
+ op op_aget_char FALLBACK
+ op op_aget_short FALLBACK
+ op op_aput FALLBACK
+ op op_aput_wide FALLBACK
+ op op_aput_object FALLBACK
+ op op_aput_boolean FALLBACK
+ op op_aput_byte FALLBACK
+ op op_aput_char FALLBACK
+ op op_aput_short FALLBACK
+ op op_iget FALLBACK
+ op op_iget_wide FALLBACK
+ op op_iget_object FALLBACK
+ op op_iget_boolean FALLBACK
+ op op_iget_byte FALLBACK
+ op op_iget_char FALLBACK
+ op op_iget_short FALLBACK
+ op op_iput FALLBACK
+ op op_iput_wide FALLBACK
+ op op_iput_object FALLBACK
+ op op_iput_boolean FALLBACK
+ op op_iput_byte FALLBACK
+ op op_iput_char FALLBACK
+ op op_iput_short FALLBACK
+ op op_sget FALLBACK
+ op op_sget_wide FALLBACK
+ op op_sget_object FALLBACK
+ op op_sget_boolean FALLBACK
+ op op_sget_byte FALLBACK
+ op op_sget_char FALLBACK
+ op op_sget_short FALLBACK
+ op op_sput FALLBACK
+ op op_sput_wide FALLBACK
+ op op_sput_object FALLBACK
+ op op_sput_boolean FALLBACK
+ op op_sput_byte FALLBACK
+ op op_sput_char FALLBACK
+ op op_sput_short FALLBACK
+ op op_invoke_virtual FALLBACK
+ op op_invoke_super FALLBACK
+ op op_invoke_direct FALLBACK
+ op op_invoke_static FALLBACK
+ op op_invoke_interface FALLBACK
+ op op_return_void_no_barrier FALLBACK
+ op op_invoke_virtual_range FALLBACK
+ op op_invoke_super_range FALLBACK
+ op op_invoke_direct_range FALLBACK
+ op op_invoke_static_range FALLBACK
+ op op_invoke_interface_range FALLBACK
+ op_unused_79 FALLBACK
+ op_unused_7a FALLBACK
+ op op_neg_int FALLBACK
+ op op_not_int FALLBACK
+ op op_neg_long FALLBACK
+ op op_not_long FALLBACK
+ op op_neg_float FALLBACK
+ op op_neg_double FALLBACK
+ op op_int_to_long FALLBACK
+ op op_int_to_float FALLBACK
+ op op_int_to_double FALLBACK
+ op op_long_to_int FALLBACK
+ op op_long_to_float FALLBACK
+ op op_long_to_double FALLBACK
+ op op_float_to_int FALLBACK
+ op op_float_to_long FALLBACK
+ op op_float_to_double FALLBACK
+ op op_double_to_int FALLBACK
+ op op_double_to_long FALLBACK
+ op op_double_to_float FALLBACK
+ op op_int_to_byte FALLBACK
+ op op_int_to_char FALLBACK
+ op op_int_to_short FALLBACK
+ op op_add_int FALLBACK
+ op op_sub_int FALLBACK
+ op op_mul_int FALLBACK
+ op op_div_int FALLBACK
+ op op_rem_int FALLBACK
+ op op_and_int FALLBACK
+ op op_or_int FALLBACK
+ op op_xor_int FALLBACK
+ op op_shl_int FALLBACK
+ op op_shr_int FALLBACK
+ op op_ushr_int FALLBACK
+ op op_add_long FALLBACK
+ op op_sub_long FALLBACK
+ op op_mul_long FALLBACK
+ op op_div_long FALLBACK
+ op op_rem_long FALLBACK
+ op op_and_long FALLBACK
+ op op_or_long FALLBACK
+ op op_xor_long FALLBACK
+ op op_shl_long FALLBACK
+ op op_shr_long FALLBACK
+ op op_ushr_long FALLBACK
+ op op_add_float FALLBACK
+ op op_sub_float FALLBACK
+ op op_mul_float FALLBACK
+ op op_div_float FALLBACK
+ op op_rem_float FALLBACK
+ op op_add_double FALLBACK
+ op op_sub_double FALLBACK
+ op op_mul_double FALLBACK
+ op op_div_double FALLBACK
+ op op_rem_double FALLBACK
+ op op_add_int_2addr FALLBACK
+ op op_sub_int_2addr FALLBACK
+ op op_mul_int_2addr FALLBACK
+ op op_div_int_2addr FALLBACK
+ op op_rem_int_2addr FALLBACK
+ op op_and_int_2addr FALLBACK
+ op op_or_int_2addr FALLBACK
+ op op_xor_int_2addr FALLBACK
+ op op_shl_int_2addr FALLBACK
+ op op_shr_int_2addr FALLBACK
+ op op_ushr_int_2addr FALLBACK
+ op op_add_long_2addr FALLBACK
+ op op_sub_long_2addr FALLBACK
+ op op_mul_long_2addr FALLBACK
+ op op_div_long_2addr FALLBACK
+ op op_rem_long_2addr FALLBACK
+ op op_and_long_2addr FALLBACK
+ op op_or_long_2addr FALLBACK
+ op op_xor_long_2addr FALLBACK
+ op op_shl_long_2addr FALLBACK
+ op op_shr_long_2addr FALLBACK
+ op op_ushr_long_2addr FALLBACK
+ op op_add_float_2addr FALLBACK
+ op op_sub_float_2addr FALLBACK
+ op op_mul_float_2addr FALLBACK
+ op op_div_float_2addr FALLBACK
+ op op_rem_float_2addr FALLBACK
+ op op_add_double_2addr FALLBACK
+ op op_sub_double_2addr FALLBACK
+ op op_mul_double_2addr FALLBACK
+ op op_div_double_2addr FALLBACK
+ op op_rem_double_2addr FALLBACK
+ op op_add_int_lit16 FALLBACK
+ op op_rsub_int FALLBACK
+ op op_mul_int_lit16 FALLBACK
+ op op_div_int_lit16 FALLBACK
+ op op_rem_int_lit16 FALLBACK
+ op op_and_int_lit16 FALLBACK
+ op op_or_int_lit16 FALLBACK
+ op op_xor_int_lit16 FALLBACK
+ op op_add_int_lit8 FALLBACK
+ op op_rsub_int_lit8 FALLBACK
+ op op_mul_int_lit8 FALLBACK
+ op op_div_int_lit8 FALLBACK
+ op op_rem_int_lit8 FALLBACK
+ op op_and_int_lit8 FALLBACK
+ op op_or_int_lit8 FALLBACK
+ op op_xor_int_lit8 FALLBACK
+ op op_shl_int_lit8 FALLBACK
+ op op_shr_int_lit8 FALLBACK
+ op op_ushr_int_lit8 FALLBACK
+ op op_iget_quick FALLBACK
+ op op_iget_wide_quick FALLBACK
+ op op_iget_object_quick FALLBACK
+ op op_iput_quick FALLBACK
+ op op_iput_wide_quick FALLBACK
+ op op_iput_object_quick FALLBACK
+ op op_invoke_virtual_quick FALLBACK
+ op op_invoke_virtual_range_quick FALLBACK
+ op op_iput_boolean_quick FALLBACK
+ op op_iput_byte_quick FALLBACK
+ op op_iput_char_quick FALLBACK
+ op op_iput_short_quick FALLBACK
+ op op_iget_boolean_quick FALLBACK
+ op op_iget_byte_quick FALLBACK
+ op op_iget_char_quick FALLBACK
+ op op_iget_short_quick FALLBACK
+ op_unused_f3 FALLBACK
+ op_unused_f4 FALLBACK
+ op_unused_f5 FALLBACK
+ op_unused_f6 FALLBACK
+ op_unused_f7 FALLBACK
+ op_unused_f8 FALLBACK
+ op_unused_f9 FALLBACK
+ op_unused_fa FALLBACK
+ op_unused_fb FALLBACK
+ op_unused_fc FALLBACK
+ op_unused_fd FALLBACK
+ op_unused_fe FALLBACK
+ op_unused_ff FALLBACK
+op-end
+
+# common subroutines for asm
+import x86_64/footer.S
diff --git a/runtime/interpreter/mterp/gen_mterp.py b/runtime/interpreter/mterp/gen_mterp.py
new file mode 100755
index 0000000000..f56d8bddaf
--- /dev/null
+++ b/runtime/interpreter/mterp/gen_mterp.py
@@ -0,0 +1,602 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Using instructions from an architecture-specific config file, generate C
+# and assembly source files for the Dalvik interpreter.
+#
+
+import sys, string, re, time
+from string import Template
+
+interp_defs_file = "../../dex_instruction_list.h" # need opcode list
+kNumPackedOpcodes = 256
+
+splitops = False
+verbose = False
+handler_size_bits = -1000
+handler_size_bytes = -1000
+in_op_start = 0 # 0=not started, 1=started, 2=ended
+in_alt_op_start = 0 # 0=not started, 1=started, 2=ended
+default_op_dir = None
+default_alt_stub = None
+opcode_locations = {}
+alt_opcode_locations = {}
+asm_stub_text = []
+fallback_stub_text = []
+label_prefix = ".L" # use ".L" to hide labels from gdb
+alt_label_prefix = ".L_ALT" # use ".L" to hide labels from gdb
+style = None # interpreter style
+generate_alt_table = False
+
+# Exception class.
+class DataParseError(SyntaxError):
+ "Failure when parsing data file"
+
+#
+# Set any omnipresent substitution values.
+#
+def getGlobalSubDict():
+ return { "handler_size_bits":handler_size_bits,
+ "handler_size_bytes":handler_size_bytes }
+
+#
+# Parse arch config file --
+# Set interpreter style.
+#
+def setHandlerStyle(tokens):
+ global style
+ if len(tokens) != 2:
+ raise DataParseError("handler-style requires one argument")
+ style = tokens[1]
+ if style != "computed-goto":
+ raise DataParseError("handler-style (%s) invalid" % style)
+
+#
+# Parse arch config file --
+# Set handler_size_bytes to the value of tokens[1], and handler_size_bits to
+# log2(handler_size_bytes). Throws an exception if "bytes" is not 0 or
+# a power of two.
+#
+def setHandlerSize(tokens):
+ global handler_size_bits, handler_size_bytes
+ if style != "computed-goto":
+ print "Warning: handler-size valid only for computed-goto interpreters"
+ if len(tokens) != 2:
+ raise DataParseError("handler-size requires one argument")
+ if handler_size_bits != -1000:
+ raise DataParseError("handler-size may only be set once")
+
+ # compute log2(n), and make sure n is 0 or a power of 2
+ handler_size_bytes = bytes = int(tokens[1])
+ bits = -1
+ while bytes > 0:
+ bytes //= 2 # halve with truncating division
+ bits += 1
+
+ if handler_size_bytes == 0 or handler_size_bytes != (1 << bits):
+ raise DataParseError("handler-size (%d) must be power of 2" \
+ % orig_bytes)
+ handler_size_bits = bits
+
+#
+# Parse arch config file --
+# Copy a file in to asm output file.
+#
+def importFile(tokens):
+ if len(tokens) != 2:
+ raise DataParseError("import requires one argument")
+ source = tokens[1]
+ if source.endswith(".S"):
+ appendSourceFile(tokens[1], getGlobalSubDict(), asm_fp, None)
+ else:
+ raise DataParseError("don't know how to import %s (expecting .cpp/.S)"
+ % source)
+
+#
+# Parse arch config file --
+# Copy a file in to the C or asm output file.
+#
+def setAsmStub(tokens):
+ global asm_stub_text
+ if len(tokens) != 2:
+ raise DataParseError("import requires one argument")
+ try:
+ stub_fp = open(tokens[1])
+ asm_stub_text = stub_fp.readlines()
+ except IOError, err:
+ stub_fp.close()
+ raise DataParseError("unable to load asm-stub: %s" % str(err))
+ stub_fp.close()
+
+#
+# Parse arch config file --
+# Copy a file in to the C or asm output file.
+#
+def setFallbackStub(tokens):
+ global fallback_stub_text
+ if len(tokens) != 2:
+ raise DataParseError("import requires one argument")
+ try:
+ stub_fp = open(tokens[1])
+ fallback_stub_text = stub_fp.readlines()
+ except IOError, err:
+ stub_fp.close()
+ raise DataParseError("unable to load fallback-stub: %s" % str(err))
+ stub_fp.close()
+#
+# Parse arch config file --
+# Record location of default alt stub
+#
+def setAsmAltStub(tokens):
+ global default_alt_stub, generate_alt_table
+ if len(tokens) != 2:
+ raise DataParseError("import requires one argument")
+ default_alt_stub = tokens[1]
+ generate_alt_table = True
+
+#
+# Parse arch config file --
+# Start of opcode list.
+#
+def opStart(tokens):
+ global in_op_start
+ global default_op_dir
+ if len(tokens) != 2:
+ raise DataParseError("opStart takes a directory name argument")
+ if in_op_start != 0:
+ raise DataParseError("opStart can only be specified once")
+ default_op_dir = tokens[1]
+ in_op_start = 1
+
+#
+# Parse arch config file --
+# Set location of a single alt opcode's source file.
+#
+def altEntry(tokens):
+ global generate_alt_table
+ if len(tokens) != 3:
+ raise DataParseError("alt requires exactly two arguments")
+ if in_op_start != 1:
+ raise DataParseError("alt statements must be between opStart/opEnd")
+ try:
+ index = opcodes.index(tokens[1])
+ except ValueError:
+ raise DataParseError("unknown opcode %s" % tokens[1])
+ if alt_opcode_locations.has_key(tokens[1]):
+ print "Note: alt overrides earlier %s (%s -> %s)" \
+ % (tokens[1], alt_opcode_locations[tokens[1]], tokens[2])
+ alt_opcode_locations[tokens[1]] = tokens[2]
+ generate_alt_table = True
+
+#
+# Parse arch config file --
+# Set location of a single opcode's source file.
+#
+def opEntry(tokens):
+ #global opcode_locations
+ if len(tokens) != 3:
+ raise DataParseError("op requires exactly two arguments")
+ if in_op_start != 1:
+ raise DataParseError("op statements must be between opStart/opEnd")
+ try:
+ index = opcodes.index(tokens[1])
+ except ValueError:
+ raise DataParseError("unknown opcode %s" % tokens[1])
+ if opcode_locations.has_key(tokens[1]):
+ print "Note: op overrides earlier %s (%s -> %s)" \
+ % (tokens[1], opcode_locations[tokens[1]], tokens[2])
+ opcode_locations[tokens[1]] = tokens[2]
+
+#
+# Parse arch config file --
+# End of opcode list; emit instruction blocks.
+#
+def opEnd(tokens):
+ global in_op_start
+ if len(tokens) != 1:
+ raise DataParseError("opEnd takes no arguments")
+ if in_op_start != 1:
+ raise DataParseError("opEnd must follow opStart, and only appear once")
+ in_op_start = 2
+
+ loadAndEmitOpcodes()
+ if splitops == False:
+ if generate_alt_table:
+ loadAndEmitAltOpcodes()
+
+def genaltop(tokens):
+ if in_op_start != 2:
+ raise DataParseError("alt-op can be specified only after op-end")
+ if len(tokens) != 1:
+ raise DataParseError("opEnd takes no arguments")
+ if generate_alt_table:
+ loadAndEmitAltOpcodes()
+
+#
+# Extract an ordered list of instructions from the VM sources. We use the
+# "goto table" definition macro, which has exactly kNumPackedOpcodes
+# entries.
+#
+def getOpcodeList():
+ opcodes = []
+ opcode_fp = open(interp_defs_file)
+ opcode_re = re.compile(r"^\s*V\((....), (\w+),.*", re.DOTALL)
+ for line in opcode_fp:
+ match = opcode_re.match(line)
+ if not match:
+ continue
+ opcodes.append("op_" + match.group(2).lower())
+ opcode_fp.close()
+
+ if len(opcodes) != kNumPackedOpcodes:
+ print "ERROR: found %d opcodes in Interp.h (expected %d)" \
+ % (len(opcodes), kNumPackedOpcodes)
+ raise SyntaxError, "bad opcode count"
+ return opcodes
+
+def emitAlign():
+ if style == "computed-goto":
+ asm_fp.write(" .balign %d\n" % handler_size_bytes)
+
+#
+# Load and emit opcodes for all kNumPackedOpcodes instructions.
+#
+def loadAndEmitOpcodes():
+ sister_list = []
+ assert len(opcodes) == kNumPackedOpcodes
+ need_dummy_start = False
+ start_label = "artMterpAsmInstructionStart"
+ end_label = "artMterpAsmInstructionEnd"
+
+ # point MterpAsmInstructionStart at the first handler or stub
+ asm_fp.write("\n .global %s\n" % start_label)
+ asm_fp.write(" .type %s, %%function\n" % start_label)
+ asm_fp.write("%s = " % start_label + label_prefix + "_op_nop\n")
+ asm_fp.write(" .text\n\n")
+
+ for i in xrange(kNumPackedOpcodes):
+ op = opcodes[i]
+
+ if opcode_locations.has_key(op):
+ location = opcode_locations[op]
+ else:
+ location = default_op_dir
+
+ if location == "FALLBACK":
+ emitFallback(i)
+ else:
+ loadAndEmitAsm(location, i, sister_list)
+
+ # For a 100% C implementation, there are no asm handlers or stubs. We
+ # need to have the MterpAsmInstructionStart label point at op_nop, and it's
+ # too annoying to try to slide it in after the alignment psuedo-op, so
+ # we take the low road and just emit a dummy op_nop here.
+ if need_dummy_start:
+ emitAlign()
+ asm_fp.write(label_prefix + "_op_nop: /* dummy */\n");
+
+ emitAlign()
+ asm_fp.write(" .size %s, .-%s\n" % (start_label, start_label))
+ asm_fp.write(" .global %s\n" % end_label)
+ asm_fp.write("%s:\n" % end_label)
+
+ if style == "computed-goto":
+ emitSectionComment("Sister implementations", asm_fp)
+ asm_fp.write(" .global artMterpAsmSisterStart\n")
+ asm_fp.write(" .type artMterpAsmSisterStart, %function\n")
+ asm_fp.write(" .text\n")
+ asm_fp.write(" .balign 4\n")
+ asm_fp.write("artMterpAsmSisterStart:\n")
+ asm_fp.writelines(sister_list)
+ asm_fp.write("\n .size artMterpAsmSisterStart, .-artMterpAsmSisterStart\n")
+ asm_fp.write(" .global artMterpAsmSisterEnd\n")
+ asm_fp.write("artMterpAsmSisterEnd:\n\n")
+
+#
+# Load an alternate entry stub
+#
+def loadAndEmitAltStub(source, opindex):
+ op = opcodes[opindex]
+ if verbose:
+ print " alt emit %s --> stub" % source
+ dict = getGlobalSubDict()
+ dict.update({ "opcode":op, "opnum":opindex })
+
+ emitAsmHeader(asm_fp, dict, alt_label_prefix)
+ appendSourceFile(source, dict, asm_fp, None)
+
+#
+# Load and emit alternate opcodes for all kNumPackedOpcodes instructions.
+#
+def loadAndEmitAltOpcodes():
+ assert len(opcodes) == kNumPackedOpcodes
+ start_label = "artMterpAsmAltInstructionStart"
+ end_label = "artMterpAsmAltInstructionEnd"
+
+ # point MterpAsmInstructionStart at the first handler or stub
+ asm_fp.write("\n .global %s\n" % start_label)
+ asm_fp.write(" .type %s, %%function\n" % start_label)
+ asm_fp.write(" .text\n\n")
+ asm_fp.write("%s = " % start_label + label_prefix + "_ALT_op_nop\n")
+
+ for i in xrange(kNumPackedOpcodes):
+ op = opcodes[i]
+ if alt_opcode_locations.has_key(op):
+ source = "%s/alt_%s.S" % (alt_opcode_locations[op], op)
+ else:
+ source = default_alt_stub
+ loadAndEmitAltStub(source, i)
+
+ emitAlign()
+ asm_fp.write(" .size %s, .-%s\n" % (start_label, start_label))
+ asm_fp.write(" .global %s\n" % end_label)
+ asm_fp.write("%s:\n" % end_label)
+
+#
+# Load an assembly fragment and emit it.
+#
+def loadAndEmitAsm(location, opindex, sister_list):
+ op = opcodes[opindex]
+ source = "%s/%s.S" % (location, op)
+ dict = getGlobalSubDict()
+ dict.update({ "opcode":op, "opnum":opindex })
+ if verbose:
+ print " emit %s --> asm" % source
+
+ emitAsmHeader(asm_fp, dict, label_prefix)
+ appendSourceFile(source, dict, asm_fp, sister_list)
+
+#
+# Emit fallback fragment
+#
+def emitFallback(opindex):
+ op = opcodes[opindex]
+ dict = getGlobalSubDict()
+ dict.update({ "opcode":op, "opnum":opindex })
+ emitAsmHeader(asm_fp, dict, label_prefix)
+ for line in fallback_stub_text:
+ asm_fp.write(line)
+ asm_fp.write("\n")
+
+#
+# Output the alignment directive and label for an assembly piece.
+#
+def emitAsmHeader(outfp, dict, prefix):
+ outfp.write("/* ------------------------------ */\n")
+ # The alignment directive ensures that the handler occupies
+ # at least the correct amount of space. We don't try to deal
+ # with overflow here.
+ emitAlign()
+ # Emit a label so that gdb will say the right thing. We prepend an
+ # underscore so the symbol name doesn't clash with the Opcode enum.
+ outfp.write(prefix + "_%(opcode)s: /* 0x%(opnum)02x */\n" % dict)
+
+#
+# Output a generic instruction stub that updates the "glue" struct and
+# calls the C implementation.
+#
+def emitAsmStub(outfp, dict):
+ emitAsmHeader(outfp, dict, label_prefix)
+ for line in asm_stub_text:
+ templ = Template(line)
+ outfp.write(templ.substitute(dict))
+
+#
+# Append the file specified by "source" to the open "outfp". Each line will
+# be template-replaced using the substitution dictionary "dict".
+#
+# If the first line of the file starts with "%" it is taken as a directive.
+# A "%include" line contains a filename and, optionally, a Python-style
+# dictionary declaration with substitution strings. (This is implemented
+# with recursion.)
+#
+# If "sister_list" is provided, and we find a line that contains only "&",
+# all subsequent lines from the file will be appended to sister_list instead
+# of copied to the output.
+#
+# This may modify "dict".
+#
+def appendSourceFile(source, dict, outfp, sister_list):
+ outfp.write("/* File: %s */\n" % source)
+ infp = open(source, "r")
+ in_sister = False
+ for line in infp:
+ if line.startswith("%include"):
+ # Parse the "include" line
+ tokens = line.strip().split(' ', 2)
+ if len(tokens) < 2:
+ raise DataParseError("malformed %%include in %s" % source)
+
+ alt_source = tokens[1].strip("\"")
+ if alt_source == source:
+ raise DataParseError("self-referential %%include in %s"
+ % source)
+
+ new_dict = dict.copy()
+ if len(tokens) == 3:
+ new_dict.update(eval(tokens[2]))
+ #print " including src=%s dict=%s" % (alt_source, new_dict)
+ appendSourceFile(alt_source, new_dict, outfp, sister_list)
+ continue
+
+ elif line.startswith("%default"):
+ # copy keywords into dictionary
+ tokens = line.strip().split(' ', 1)
+ if len(tokens) < 2:
+ raise DataParseError("malformed %%default in %s" % source)
+ defaultValues = eval(tokens[1])
+ for entry in defaultValues:
+ dict.setdefault(entry, defaultValues[entry])
+ continue
+
+ elif line.startswith("%break") and sister_list != None:
+ # allow more than one %break, ignoring all following the first
+ if style == "computed-goto" and not in_sister:
+ in_sister = True
+ sister_list.append("\n/* continuation for %(opcode)s */\n"%dict)
+ continue
+
+ # perform keyword substitution if a dictionary was provided
+ if dict != None:
+ templ = Template(line)
+ try:
+ subline = templ.substitute(dict)
+ except KeyError, err:
+ raise DataParseError("keyword substitution failed in %s: %s"
+ % (source, str(err)))
+ except:
+ print "ERROR: substitution failed: " + line
+ raise
+ else:
+ subline = line
+
+ # write output to appropriate file
+ if in_sister:
+ sister_list.append(subline)
+ else:
+ outfp.write(subline)
+ outfp.write("\n")
+ infp.close()
+
+#
+# Emit a C-style section header comment.
+#
+def emitSectionComment(str, fp):
+ equals = "========================================" \
+ "==================================="
+
+ fp.write("\n/*\n * %s\n * %s\n * %s\n */\n" %
+ (equals, str, equals))
+
+
+#
+# ===========================================================================
+# "main" code
+#
+
+#
+# Check args.
+#
+if len(sys.argv) != 3:
+ print "Usage: %s target-arch output-dir" % sys.argv[0]
+ sys.exit(2)
+
+target_arch = sys.argv[1]
+output_dir = sys.argv[2]
+
+#
+# Extract opcode list.
+#
+opcodes = getOpcodeList()
+#for op in opcodes:
+# print " %s" % op
+
+#
+# Open config file.
+#
+try:
+ config_fp = open("config_%s" % target_arch)
+except:
+ print "Unable to open config file 'config_%s'" % target_arch
+ sys.exit(1)
+
+#
+# Open and prepare output files.
+#
+try:
+ asm_fp = open("%s/mterp_%s.S" % (output_dir, target_arch), "w")
+except:
+ print "Unable to open output files"
+ print "Make sure directory '%s' exists and existing files are writable" \
+ % output_dir
+ # Ideally we'd remove the files to avoid confusing "make", but if they
+ # failed to open we probably won't be able to remove them either.
+ sys.exit(1)
+
+print "Generating %s" % (asm_fp.name)
+
+file_header = """/*
+ * This file was generated automatically by gen-mterp.py for '%s'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+""" % (target_arch)
+
+asm_fp.write(file_header)
+
+#
+# Process the config file.
+#
+failed = False
+try:
+ for line in config_fp:
+ line = line.strip() # remove CRLF, leading spaces
+ tokens = line.split(' ') # tokenize
+ #print "%d: %s" % (len(tokens), tokens)
+ if len(tokens[0]) == 0:
+ #print " blank"
+ pass
+ elif tokens[0][0] == '#':
+ #print " comment"
+ pass
+ else:
+ if tokens[0] == "handler-size":
+ setHandlerSize(tokens)
+ elif tokens[0] == "import":
+ importFile(tokens)
+ elif tokens[0] == "asm-stub":
+ setAsmStub(tokens)
+ elif tokens[0] == "asm-alt-stub":
+ setAsmAltStub(tokens)
+ elif tokens[0] == "op-start":
+ opStart(tokens)
+ elif tokens[0] == "op-end":
+ opEnd(tokens)
+ elif tokens[0] == "alt":
+ altEntry(tokens)
+ elif tokens[0] == "op":
+ opEntry(tokens)
+ elif tokens[0] == "handler-style":
+ setHandlerStyle(tokens)
+ elif tokens[0] == "alt-ops":
+ genaltop(tokens)
+ elif tokens[0] == "split-ops":
+ splitops = True
+ elif tokens[0] == "fallback-stub":
+ setFallbackStub(tokens)
+ else:
+ raise DataParseError, "unrecognized command '%s'" % tokens[0]
+ if style == None:
+ print "tokens[0] = %s" % tokens[0]
+ raise DataParseError, "handler-style must be first command"
+except DataParseError, err:
+ print "Failed: " + str(err)
+ # TODO: remove output files so "make" doesn't get confused
+ failed = True
+ asm_fp.close()
+ asm_fp = None
+
+config_fp.close()
+
+#
+# Done!
+#
+if asm_fp:
+ asm_fp.close()
+
+sys.exit(failed)
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
new file mode 100644
index 0000000000..9975458b85
--- /dev/null
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -0,0 +1,620 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Mterp entry point and support functions.
+ */
+#include "interpreter/interpreter_common.h"
+#include "entrypoints/entrypoint_utils-inl.h"
+#include "mterp.h"
+
+namespace art {
+namespace interpreter {
+/*
+ * Verify some constants used by the mterp interpreter.
+ */
+void CheckMterpAsmConstants() {
+ /*
+ * If we're using computed goto instruction transitions, make sure
+ * none of the handlers overflows the 128-byte limit. This won't tell
+ * which one did, but if any one is too big the total size will
+ * overflow.
+ */
+ const int width = 128;
+ int interp_size = (uintptr_t) artMterpAsmInstructionEnd -
+ (uintptr_t) artMterpAsmInstructionStart;
+ if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
+ LOG(art::FATAL) << "ERROR: unexpected asm interp size " << interp_size
+ << "(did an instruction handler exceed " << width << " bytes?)";
+ }
+}
+
+void InitMterpTls(Thread* self) {
+ self->SetMterpDefaultIBase(artMterpAsmInstructionStart);
+ self->SetMterpAltIBase(artMterpAsmAltInstructionStart);
+ self->SetMterpCurrentIBase(artMterpAsmInstructionStart);
+}
+
+/*
+ * Find the matching case. Returns the offset to the handler instructions.
+ *
+ * Returns 3 if we don't find a match (it's the size of the sparse-switch
+ * instruction).
+ */
+extern "C" int32_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal) {
+ const int kInstrLen = 3;
+ uint16_t size;
+ const int32_t* keys;
+ const int32_t* entries;
+
+ /*
+ * Sparse switch data format:
+ * ushort ident = 0x0200 magic value
+ * ushort size number of entries in the table; > 0
+ * int keys[size] keys, sorted low-to-high; 32-bit aligned
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (2+size*4) 16-bit code units.
+ */
+
+ uint16_t signature = *switchData++;
+ DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
+
+ size = *switchData++;
+
+ /* The keys are guaranteed to be aligned on a 32-bit boundary;
+ * we can treat them as a native int array.
+ */
+ keys = reinterpret_cast<const int32_t*>(switchData);
+
+ /* The entries are guaranteed to be aligned on a 32-bit boundary;
+ * we can treat them as a native int array.
+ */
+ entries = keys + size;
+
+ /*
+ * Binary-search through the array of keys, which are guaranteed to
+ * be sorted low-to-high.
+ */
+ int lo = 0;
+ int hi = size - 1;
+ while (lo <= hi) {
+ int mid = (lo + hi) >> 1;
+
+ int32_t foundVal = keys[mid];
+ if (testVal < foundVal) {
+ hi = mid - 1;
+ } else if (testVal > foundVal) {
+ lo = mid + 1;
+ } else {
+ return entries[mid];
+ }
+ }
+ return kInstrLen;
+}
+
+extern "C" int32_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal) {
+ const int kInstrLen = 3;
+
+ /*
+ * Packed switch data format:
+ * ushort ident = 0x0100 magic value
+ * ushort size number of entries in the table
+ * int first_key first (and lowest) switch case value
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (4+size*2) 16-bit code units.
+ */
+ uint16_t signature = *switchData++;
+ DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
+
+ uint16_t size = *switchData++;
+
+ int32_t firstKey = *switchData++;
+ firstKey |= (*switchData++) << 16;
+
+ int index = testVal - firstKey;
+ if (index < 0 || index >= size) {
+ return kInstrLen;
+ }
+
+ /*
+ * The entries are guaranteed to be aligned on a 32-bit boundary;
+ * we can treat them as a native int array.
+ */
+ const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
+ return entries[index];
+}
+
+
+extern "C" bool MterpInvokeVirtual(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kVirtual, false, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeSuper(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kSuper, false, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeInterface(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kInterface, false, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeDirect(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kDirect, false, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeStatic(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kStatic, false, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeVirtualRange(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kVirtual, true, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeSuperRange(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kSuper, true, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeInterfaceRange(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kInterface, true, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeDirectRange(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kDirect, true, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeStaticRange(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kStatic, true, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeVirtualQuick(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvokeVirtualQuick<false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeVirtualQuickRange(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvokeVirtualQuick<true>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" void MterpThreadFenceForConstructor() {
+ QuasiAtomic::ThreadFenceForConstructor();
+}
+
+extern "C" bool MterpConstString(uint32_t index, uint32_t tgt_vreg, ShadowFrame* shadow_frame,
+ Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ String* s = ResolveString(self, *shadow_frame, index);
+ if (UNLIKELY(s == nullptr)) {
+ return true;
+ }
+ shadow_frame->SetVRegReference(tgt_vreg, s);
+ return false;
+}
+
+extern "C" bool MterpConstClass(uint32_t index, uint32_t tgt_vreg, ShadowFrame* shadow_frame,
+ Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ Class* c = ResolveVerifyAndClinit(index, shadow_frame->GetMethod(), self, false, false);
+ if (UNLIKELY(c == nullptr)) {
+ return true;
+ }
+ shadow_frame->SetVRegReference(tgt_vreg, c);
+ return false;
+}
+
+extern "C" bool MterpCheckCast(uint32_t index, Object* obj, art::ArtMethod* method,
+ Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
+ if (UNLIKELY(c == nullptr)) {
+ return true;
+ }
+ if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
+ ThrowClassCastException(c, obj->GetClass());
+ return true;
+ }
+ return false;
+}
+
+extern "C" bool MterpInstanceOf(uint32_t index, Object* obj, art::ArtMethod* method,
+ Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
+ if (UNLIKELY(c == nullptr)) {
+ return false; // Caller will check for pending exception. Return value unimportant.
+ }
+ return (obj != nullptr) && obj->InstanceOf(c);
+}
+
+extern "C" bool MterpFillArrayData(Object* obj, const Instruction::ArrayDataPayload* payload)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ return FillArrayData(obj, payload);
+}
+
+extern "C" bool MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ Object* obj = nullptr;
+ Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame->GetMethod(),
+ self, false, false);
+ if (LIKELY(c != nullptr)) {
+ if (UNLIKELY(c->IsStringClass())) {
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ mirror::SetStringCountVisitor visitor(0);
+ obj = String::Alloc<true>(self, 0, allocator_type, visitor);
+ } else {
+ obj = AllocObjectFromCode<false, true>(
+ inst->VRegB_21c(), shadow_frame->GetMethod(), self,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ }
+ }
+ if (UNLIKELY(obj == nullptr)) {
+ return false;
+ }
+ obj->GetClass()->AssertInitializedOrInitializingInThread(self);
+ shadow_frame->SetVRegReference(inst->VRegA_21c(inst_data), obj);
+ return true;
+}
+
+extern "C" bool MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ uint32_t inst_data, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, false, false>
+ (self, *shadow_frame, inst, inst_data);
+}
+
+extern "C" bool MterpIputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ uint32_t inst_data, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, false, false>
+ (self, *shadow_frame, inst, inst_data);
+}
+
+extern "C" bool MterpIputObjectQuick(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ uint32_t inst_data)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data);
+}
+
+extern "C" bool MterpAputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ uint32_t inst_data)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == nullptr)) {
+ return false;
+ }
+ int32_t index = shadow_frame->GetVReg(inst->VRegC_23x());
+ Object* val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
+ ObjectArray<Object>* array = a->AsObjectArray<Object>();
+ if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
+ array->SetWithoutChecks<false>(index, val);
+ return true;
+ }
+ return false;
+}
+
+extern "C" bool MterpFilledNewArray(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self,
+ shadow_frame->GetResultRegister());
+}
+
+extern "C" bool MterpFilledNewArrayRange(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self,
+ shadow_frame->GetResultRegister());
+}
+
+extern "C" bool MterpNewArray(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ uint32_t inst_data, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
+ Object* obj = AllocArrayFromCode<false, true>(
+ inst->VRegC_22c(), length, shadow_frame->GetMethod(), self,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ if (UNLIKELY(obj == nullptr)) {
+ return false;
+ }
+ shadow_frame->SetVRegReference(inst->VRegA_22c(inst_data), obj);
+ return true;
+}
+
+extern "C" bool MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(self->IsExceptionPending());
+ const instrumentation::Instrumentation* const instrumentation =
+ Runtime::Current()->GetInstrumentation();
+ uint32_t found_dex_pc = FindNextInstructionFollowingException(self, *shadow_frame,
+ shadow_frame->GetDexPC(),
+ instrumentation);
+ if (found_dex_pc == DexFile::kDexNoIndex) {
+ return false;
+ }
+ // OK - we can deal with it. Update and continue.
+ shadow_frame->SetDexPC(found_dex_pc);
+ return true;
+}
+
+extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
+ self->AssertPendingException();
+ } else {
+ self->AssertNoPendingException();
+ }
+}
+
+extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "DivideByZero: " << inst->Opcode(inst_data);
+}
+
+extern "C" void MterpLogArrayIndexException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "ArrayIndex: " << inst->Opcode(inst_data);
+}
+
+extern "C" void MterpLogNegativeArraySizeException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "NegativeArraySize: " << inst->Opcode(inst_data);
+}
+
+extern "C" void MterpLogNoSuchMethodException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "NoSuchMethod: " << inst->Opcode(inst_data);
+}
+
+extern "C" void MterpLogExceptionThrownException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "ExceptionThrown: " << inst->Opcode(inst_data);
+}
+
+extern "C" void MterpLogNullObjectException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "NullObject: " << inst->Opcode(inst_data);
+}
+
+extern "C" void MterpLogFallback(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "Fallback: " << inst->Opcode(inst_data) << ", Suspend Pending?: "
+ << self->IsExceptionPending();
+}
+
+extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame, uint32_t flags)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ if (flags & kCheckpointRequest) {
+ LOG(INFO) << "Checkpoint fallback: " << inst->Opcode(inst_data);
+ } else if (flags & kSuspendRequest) {
+ LOG(INFO) << "Suspend fallback: " << inst->Opcode(inst_data);
+ }
+}
+
+extern "C" void MterpSuspendCheck(Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ self->AllowThreadSuspension();
+}
+
+extern "C" int artSet64IndirectStaticFromMterp(uint32_t field_idx, ArtMethod* referrer,
+ uint64_t* new_value, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
+ sizeof(int64_t));
+ if (LIKELY(field != nullptr)) {
+ // Compiled code can't use transactional mode.
+ field->Set64<false>(field->GetDeclaringClass(), *new_value);
+ return 0; // success
+ }
+ field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int64_t));
+ if (LIKELY(field != nullptr)) {
+ // Compiled code can't use transactional mode.
+ field->Set64<false>(field->GetDeclaringClass(), *new_value);
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" int artSet8InstanceFromMterp(uint32_t field_idx, mirror::Object* obj, uint8_t new_value,
+ ArtMethod* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
+ sizeof(int8_t));
+ if (LIKELY(field != nullptr && obj != nullptr)) {
+ Primitive::Type type = field->GetTypeAsPrimitiveType();
+ if (type == Primitive::kPrimBoolean) {
+ field->SetBoolean<false>(obj, new_value);
+ } else {
+ DCHECK_EQ(Primitive::kPrimByte, type);
+ field->SetByte<false>(obj, new_value);
+ }
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" int artSet16InstanceFromMterp(uint32_t field_idx, mirror::Object* obj, uint16_t new_value,
+ ArtMethod* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
+ sizeof(int16_t));
+ if (LIKELY(field != nullptr && obj != nullptr)) {
+ Primitive::Type type = field->GetTypeAsPrimitiveType();
+ if (type == Primitive::kPrimChar) {
+ field->SetChar<false>(obj, new_value);
+ } else {
+ DCHECK_EQ(Primitive::kPrimShort, type);
+ field->SetShort<false>(obj, new_value);
+ }
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" int artSet32InstanceFromMterp(uint32_t field_idx, mirror::Object* obj,
+ uint32_t new_value, ArtMethod* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
+ sizeof(int32_t));
+ if (LIKELY(field != nullptr && obj != nullptr)) {
+ field->Set32<false>(obj, new_value);
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" int artSet64InstanceFromMterp(uint32_t field_idx, mirror::Object* obj,
+ uint64_t* new_value, ArtMethod* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
+ sizeof(int64_t));
+ if (LIKELY(field != nullptr && obj != nullptr)) {
+ field->Set64<false>(obj, *new_value);
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" int artSetObjInstanceFromMterp(uint32_t field_idx, mirror::Object* obj,
+ mirror::Object* new_value, ArtMethod* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
+ sizeof(mirror::HeapReference<mirror::Object>));
+ if (LIKELY(field != nullptr && obj != nullptr)) {
+ field->SetObj<false>(obj, new_value);
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, int32_t index)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (UNLIKELY(arr == nullptr)) {
+ ThrowNullPointerExceptionFromInterpreter();
+ return nullptr;
+ }
+ ObjectArray<Object>* array = arr->AsObjectArray<Object>();
+ if (LIKELY(array->CheckIsValidIndex(index))) {
+ return array->GetWithoutChecks(index);
+ } else {
+ return nullptr;
+ }
+}
+
+extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj, uint32_t field_offset)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (UNLIKELY(obj == nullptr)) {
+ ThrowNullPointerExceptionFromInterpreter();
+ return nullptr;
+ }
+ return obj->GetFieldObject<mirror::Object>(MemberOffset(field_offset));
+}
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/mterp/mterp.h b/runtime/interpreter/mterp/mterp.h
new file mode 100644
index 0000000000..90d21e9c89
--- /dev/null
+++ b/runtime/interpreter/mterp/mterp.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_MTERP_MTERP_H_
+#define ART_RUNTIME_INTERPRETER_MTERP_MTERP_H_
+
+/*
+ * Mterp assembly handler bases
+ */
+extern "C" void* artMterpAsmInstructionStart[];
+extern "C" void* artMterpAsmInstructionEnd[];
+extern "C" void* artMterpAsmAltInstructionStart[];
+extern "C" void* artMterpAsmAltInstructionEnd[];
+
+namespace art {
+namespace interpreter {
+
+void InitMterpTls(Thread* self);
+void CheckMterpAsmConstants();
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_MTERP_MTERP_H_
diff --git a/runtime/interpreter/mterp/mterp_stub.cc b/runtime/interpreter/mterp/mterp_stub.cc
new file mode 100644
index 0000000000..7e7337e5fa
--- /dev/null
+++ b/runtime/interpreter/mterp/mterp_stub.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "../interpreter_common.h"
+
+/*
+ * Stub definitions for targets without mterp implementations.
+ */
+
+namespace art {
+namespace interpreter {
+/*
+ * Call this during initialization to verify that the values in asm-constants.h
+ * are still correct.
+ */
+void CheckMterpAsmConstants() {
+ // Dummy version when mterp not implemented.
+}
+
+void InitMterpTls(Thread* self) {
+ self->SetMterpDefaultIBase(nullptr);
+ self->SetMterpCurrentIBase(nullptr);
+ self->SetMterpAltIBase(nullptr);
+}
+
+/*
+ * The platform-specific implementation must provide this.
+ */
+extern "C" bool ExecuteMterpImpl(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result_register)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self); UNUSED(shadow_frame); UNUSED(code_item); UNUSED(result_register);
+ UNIMPLEMENTED(art::FATAL);
+ return false;
+}
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
new file mode 100644
index 0000000000..2d6f057aa7
--- /dev/null
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -0,0 +1,12202 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'arm'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: arm/header.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via rFP &
+ number_of_vregs_.
+
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them. If VFP
+is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
+s0-s15 (d0-d7, q0-a3) do not need to be.
+
+Stack is "full descending". Only the arguments that don't fit in the first 4
+registers are placed on the stack. "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+Mterp and ARM notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ r4 rPC interpreted program counter, used for fetching instructions
+ r5 rFP interpreted frame pointer, used for accessing locals and args
+ r6 rSELF self (Thread) pointer
+ r7 rINST first 16-bit code unit of current instruction
+ r8 rIBASE interpreted instruction base pointer, used for computed goto
+ r11 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rPC r4
+#define rFP r5
+#define rSELF r6
+#define rINST r7
+#define rIBASE r8
+#define rREFS r11
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+/*
+ *
+ * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
+ * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
+ * mterp should do so as well.
+ */
+#define MTERP_SUSPEND 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+.endm
+
+.macro EXPORT_DEX_PC tmp
+ ldr \tmp, [rFP, #OFF_FP_CODE_ITEM]
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+ add \tmp, #CODEITEM_INSNS_OFFSET
+ sub \tmp, rPC, \tmp
+ asr \tmp, #1
+ str \tmp, [rFP, #OFF_FP_DEX_PC]
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+.macro FETCH_INST
+ ldrh rINST, [rPC]
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+ ldrh rINST, [rPC, #((\count)*2)]!
+.endm
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to rPC and rINST).
+ */
+.macro PREFETCH_ADVANCE_INST dreg, sreg, count
+ ldrh \dreg, [\sreg, #((\count)*2)]!
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
+ * rINST ahead of possible exception point. Be sure to manually advance rPC
+ * later.
+ */
+.macro PREFETCH_INST count
+ ldrh rINST, [rPC, #((\count)*2)]
+.endm
+
+/* Advance rPC by some number of code units. */
+.macro ADVANCE count
+ add rPC, #((\count)*2)
+.endm
+
+/*
+ * Fetch the next instruction from an offset specified by _reg. Updates
+ * rPC to point to the next instruction. "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ *
+ * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
+ * bits that hold the shift distance are used for the half/byte/sign flags.
+ * In some cases we can pre-double _reg for free, so we require a byte offset
+ * here.
+ */
+.macro FETCH_ADVANCE_INST_RB reg
+ ldrh rINST, [rPC, \reg]!
+.endm
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+.macro FETCH reg, count
+ ldrh \reg, [rPC, #((\count)*2)]
+.endm
+
+.macro FETCH_S reg, count
+ ldrsh \reg, [rPC, #((\count)*2)]
+.endm
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+.macro FETCH_B reg, count, byte
+ ldrb \reg, [rPC, #((\count)*2+(\byte))]
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+ and \reg, rINST, #255
+.endm
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+.macro GET_PREFETCHED_OPCODE oreg, ireg
+ and \oreg, \ireg, #255
+.endm
+
+/*
+ * Begin executing the opcode in _reg. Because this only jumps within the
+ * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
+ */
+.macro GOTO_OPCODE reg
+ add pc, rIBASE, \reg, lsl #7
+.endm
+.macro GOTO_OPCODE_BASE base,reg
+ add pc, \base, \reg, lsl #7
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+.macro GET_VREG reg, vreg
+ ldr \reg, [rFP, \vreg, lsl #2]
+.endm
+.macro SET_VREG reg, vreg
+ str \reg, [rFP, \vreg, lsl #2]
+ mov \reg, #0
+ str \reg, [rREFS, \vreg, lsl #2]
+.endm
+.macro SET_VREG_OBJECT reg, vreg, tmpreg
+ str \reg, [rFP, \vreg, lsl #2]
+ str \reg, [rREFS, \vreg, lsl #2]
+.endm
+
+/*
+ * Convert a virtual register index into an address.
+ */
+.macro VREG_INDEX_TO_ADDR reg, vreg
+ add \reg, rFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+.endm
+
+/* File: arm/entry.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+ .text
+ .align 2
+ .global ExecuteMterpImpl
+ .type ExecuteMterpImpl, %function
+
+/*
+ * On entry:
+ * r0 Thread* self/
+ * r1 code_item
+ * r2 ShadowFrame
+ * r3 JValue* result_register
+ *
+ */
+
+ExecuteMterpImpl:
+ .fnstart
+ .save {r4-r10,fp,lr}
+ stmfd sp!, {r4-r10,fp,lr} @ save 9 regs
+ .pad #4
+ sub sp, sp, #4 @ align 64
+
+ /* Remember the return register */
+ str r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
+
+ /* Remember the code_item */
+ str r1, [r2, #SHADOWFRAME_CODE_ITEM_OFFSET]
+
+ /* set up "named" registers */
+ mov rSELF, r0
+ ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
+ add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to insns[] (i.e. - the dalivk byte code).
+ add rREFS, rFP, r0, lsl #2 @ point to reference array in shadow frame
+ ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
+ add rPC, r1, #CODEITEM_INSNS_OFFSET @ Point to base of insns[]
+ add rPC, rPC, r0, lsl #1 @ Create direct pointer to 1st dex opcode
+ EXPORT_PC
+
+ /* Starting ibase */
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+
+ /* start executing the instruction at rPC */
+ FETCH_INST @ load rINST from rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+ /* NOTE: no fallthrough */
+
+
+ .global artMterpAsmInstructionStart
+ .type artMterpAsmInstructionStart, %function
+artMterpAsmInstructionStart = .L_op_nop
+ .text
+
+/* ------------------------------ */
+ .balign 128
+.L_op_nop: /* 0x00 */
+/* File: arm/op_nop.S */
+ FETCH_ADVANCE_INST 1 @ advance to next instr, load rINST
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ GOTO_OPCODE ip @ execute it
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move: /* 0x01 */
+/* File: arm/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[B]
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ .if 0
+ SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[A]<- r2
+ .endif
+ GOTO_OPCODE ip @ execute next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_from16: /* 0x02 */
+/* File: arm/op_move_from16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH r1, 1 @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT r2, r0 @ fp[AA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_16: /* 0x03 */
+/* File: arm/op_move_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH r1, 2 @ r1<- BBBB
+ FETCH r0, 1 @ r0<- AAAA
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT r2, r0 @ fp[AAAA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AAAA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide: /* 0x04 */
+/* File: arm/op_move_wide.S */
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[B]
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide_from16: /* 0x05 */
+/* File: arm/op_move_wide_from16.S */
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH r3, 1 @ r3<- BBBB
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide_16: /* 0x06 */
+/* File: arm/op_move_wide_16.S */
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH r3, 2 @ r3<- BBBB
+ FETCH r2, 1 @ r2<- AAAA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object: /* 0x07 */
+/* File: arm/op_move_object.S */
+/* File: arm/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[B]
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ .if 1
+ SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[A]<- r2
+ .endif
+ GOTO_OPCODE ip @ execute next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object_from16: /* 0x08 */
+/* File: arm/op_move_object_from16.S */
+/* File: arm/op_move_from16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH r1, 1 @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if 1
+ SET_VREG_OBJECT r2, r0 @ fp[AA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object_16: /* 0x09 */
+/* File: arm/op_move_object_16.S */
+/* File: arm/op_move_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH r1, 2 @ r1<- BBBB
+ FETCH r0, 1 @ r0<- AAAA
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if 1
+ SET_VREG_OBJECT r2, r0 @ fp[AAAA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AAAA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result: /* 0x0a */
+/* File: arm/op_move_result.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ ldr r0, [rFP, #OFF_FP_RESULT_REGISTER] @ get pointer to result JType.
+ ldr r0, [r0] @ r0 <- result.i.
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT r0, r2, r1 @ fp[AA]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result_wide: /* 0x0b */
+/* File: arm/op_move_result_wide.S */
+ /* move-result-wide vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ ldr r3, [rFP, #OFF_FP_RESULT_REGISTER]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- retval.j
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result_object: /* 0x0c */
+/* File: arm/op_move_result_object.S */
+/* File: arm/op_move_result.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ ldr r0, [rFP, #OFF_FP_RESULT_REGISTER] @ get pointer to result JType.
+ ldr r0, [r0] @ r0 <- result.i.
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if 1
+ SET_VREG_OBJECT r0, r2, r1 @ fp[AA]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_exception: /* 0x0d */
+/* File: arm/op_move_exception.S */
+ /* move-exception vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r1, #0 @ r1<- 0
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ SET_VREG_OBJECT r3, r2 @ fp[AA]<- exception obj
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ clear exception
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_void: /* 0x0e */
+/* File: arm/op_return_void.S */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r0, #0
+ mov r1, #0
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return: /* 0x0f */
+/* File: arm/op_return.S */
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA
+ mov r1, #0
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_wide: /* 0x10 */
+/* File: arm/op_return_wide.S */
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_object: /* 0x11 */
+/* File: arm/op_return_object.S */
+/* File: arm/op_return.S */
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA
+ mov r1, #0
+ b MterpReturn
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_4: /* 0x12 */
+/* File: arm/op_const_4.S */
+ /* const/4 vA, #+B */
+ mov r1, rINST, lsl #16 @ r1<- Bxxx0000
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended)
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ SET_VREG r1, r0 @ fp[A]<- r1
+ GOTO_OPCODE ip @ execute next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_16: /* 0x13 */
+/* File: arm/op_const_16.S */
+ /* const/16 vAA, #+BBBB */
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const: /* 0x14 */
+/* File: arm/op_const.S */
+ /* const vAA, #+BBBBbbbb */
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH r0, 1 @ r0<- bbbb (low
+ FETCH r1, 2 @ r1<- BBBB (high
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_high16: /* 0x15 */
+/* File: arm/op_const_high16.S */
+ /* const/high16 vAA, #+BBBB0000 */
+ FETCH r0, 1 @ r0<- 0000BBBB (zero-extended
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, r0, lsl #16 @ r0<- BBBB0000
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_16: /* 0x16 */
+/* File: arm/op_const_wide_16.S */
+ /* const-wide/16 vAA, #+BBBB */
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_32: /* 0x17 */
+/* File: arm/op_const_wide_32.S */
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ FETCH r0, 1 @ r0<- 0000bbbb (low)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_S r2, 2 @ r2<- ssssBBBB (high)
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide: /* 0x18 */
+/* File: arm/op_const_wide.S */
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ FETCH r0, 1 @ r0<- bbbb (low)
+ FETCH r1, 2 @ r1<- BBBB (low middle)
+ FETCH r2, 3 @ r2<- hhhh (high middle)
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
+ FETCH r3, 4 @ r3<- HHHH (high)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
+ FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_high16: /* 0x19 */
+/* File: arm/op_const_wide_high16.S */
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ FETCH r1, 1 @ r1<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, #0 @ r0<- 00000000
+ mov r1, r1, lsl #16 @ r1<- BBBB0000
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_string: /* 0x1a */
+/* File: arm/op_const_string.S */
+ /* const/string vAA, String@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 @ load rINST
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_string_jumbo: /* 0x1b */
+/* File: arm/op_const_string_jumbo.S */
+ /* const/string vAA, String@BBBBBBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- bbbb (low
+ FETCH r2, 2 @ r2<- BBBB (high
+ mov r1, rINST, lsr #8 @ r1<- AA
+ orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 3 @ advance rPC
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 3 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_class: /* 0x1c */
+/* File: arm/op_const_class.S */
+ /* const/class vAA, Class@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstClass @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_monitor_enter: /* 0x1d */
+/* File: arm/op_monitor_enter.S */
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA (object)
+ mov r1, rSELF @ r1<- self
+ bl artLockObjectFromCode
+ cmp r0, #0
+ bne MterpException
+ FETCH_ADVANCE_INST 1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_monitor_exit: /* 0x1e */
+/* File: arm/op_monitor_exit.S */
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA (object)
+ mov r1, rSELF @ r0<- self
+ bl artUnlockObjectFromCode @ r0<- success for unlock(self, obj)
+ cmp r0, #0 @ failed?
+ bne MterpException
+ FETCH_ADVANCE_INST 1 @ before throw: advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_check_cast: /* 0x1f */
+/* File: arm/op_check_cast.S */
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ GET_VREG r1, r1 @ r1<- object
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
+ mov r3, rSELF @ r3<- self
+ bl MterpCheckCast @ (index, obj, method, self)
+ PREFETCH_INST 2
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_instance_of: /* 0x20 */
+/* File: arm/op_instance_of.S */
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- vB (object)
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
+ mov r3, rSELF @ r3<- self
+ mov r9, rINST, lsr #8 @ r9<- A+
+ and r9, r9, #15 @ r9<- A
+ bl MterpInstanceOf @ (index, obj, method, self)
+ ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ PREFETCH_INST 2
+ cmp r1, #0 @ exception pending?
+ bne MterpException
+ ADVANCE 2 @ advance rPC
+ SET_VREG r0, r9 @ vA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_array_length: /* 0x21 */
+/* File: arm/op_array_length.S */
+ /*
+ * Return the length of an array.
+ */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r0, r1 @ r0<- vB (object ref)
+ cmp r0, #0 @ is object null?
+ beq common_errNullObject @ yup, fail
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- array length
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r3, r2 @ vB<- length
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_new_instance: /* 0x22 */
+/* File: arm/op_new_instance.S */
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rSELF
+ mov r2, rINST
+ bl MterpNewInstance @ (shadow_frame, self, inst_data)
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_new_array: /* 0x23 */
+/* File: arm/op_new_array.S */
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ mov r3, rSELF
+ bl MterpNewArray
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_filled_new_array: /* 0x24 */
+/* File: arm/op_filled_new_array.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ .extern MterpFilledNewArray
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rSELF
+ bl MterpFilledNewArray
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_filled_new_array_range: /* 0x25 */
+/* File: arm/op_filled_new_array_range.S */
+/* File: arm/op_filled_new_array.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ .extern MterpFilledNewArrayRange
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rSELF
+ bl MterpFilledNewArrayRange
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_fill_array_data: /* 0x26 */
+/* File: arm/op_fill_array_data.S */
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
+ GET_VREG r0, r3 @ r0<- vAA (array object)
+ add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
+ bl MterpFillArrayData @ (obj, payload)
+ cmp r0, #0 @ 0 means an exception is thrown
+ beq MterpPossibleException @ exception?
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_throw: /* 0x27 */
+/* File: arm/op_throw.S */
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r1, r2 @ r1<- vAA (exception object)
+ cmp r1, #0 @ null object?
+ beq common_errNullObject @ yes, throw an NPE instead
+ str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ thread->exception<- obj
+ b MterpException
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto: /* 0x28 */
+/* File: arm/op_goto.S */
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ /* tuning: use sbfx for 6t2+ targets */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsl #16 @ r0<- AAxx0000
+ movs r1, r0, asr #24 @ r1<- ssssssAA (sign-extended)
+ add r2, r1, r1 @ r2<- byte offset, set flags
+ @ If backwards branch refresh rIBASE
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ mov r0, rINST, lsl #16 @ r0<- AAxx0000
+ movs r1, r0, asr #24 @ r1<- ssssssAA (sign-extended)
+ add r2, r1, r1 @ r2<- byte offset, set flags
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ @ If backwards branch refresh rIBASE
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto_16: /* 0x29 */
+/* File: arm/op_goto_16.S */
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+#if MTERP_SUSPEND
+ FETCH_S r0, 1 @ r0<- ssssAAAA (sign-extended)
+ adds r1, r0, r0 @ r1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH_S r0, 1 @ r0<- ssssAAAA (sign-extended)
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ adds r1, r0, r0 @ r1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto_32: /* 0x2a */
+/* File: arm/op_goto_32.S */
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0". Because
+ * we need the V bit set, we'll use an adds to convert from Dalvik
+ * offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+#if MTERP_SUSPEND
+ FETCH r0, 1 @ r0<- aaaa (lo)
+ FETCH r1, 2 @ r1<- AAAA (hi)
+ orr r0, r0, r1, lsl #16 @ r0<- AAAAaaaa
+ adds r1, r0, r0 @ r1<- byte offset
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrle rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH r0, 1 @ r0<- aaaa (lo)
+ FETCH r1, 2 @ r1<- AAAA (hi)
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ orr r0, r0, r1, lsl #16 @ r0<- AAAAaaaa
+ adds r1, r0, r0 @ r1<- byte offset
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ble MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_packed_switch: /* 0x2b */
+/* File: arm/op_packed_switch.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+#if MTERP_SUSPEND
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG r1, r3 @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl MterpDoPackedSwitch @ r0<- code-unit branch offset
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+ ldrle rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG r1, r3 @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl MterpDoPackedSwitch @ r0<- code-unit branch offset
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ble MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sparse_switch: /* 0x2c */
+/* File: arm/op_sparse_switch.S */
+/* File: arm/op_packed_switch.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+#if MTERP_SUSPEND
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG r1, r3 @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl MterpDoSparseSwitch @ r0<- code-unit branch offset
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+ ldrle rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG r1, r3 @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl MterpDoSparseSwitch @ r0<- code-unit branch offset
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ble MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpl_float: /* 0x2d */
+/* File: arm/op_cmpl_float.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ flds s0, [r2] @ s0<- vBB
+ flds s1, [r3] @ s1<- vCC
+ fcmpes s0, s1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mvn r0, #0 @ r0<- -1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r1<- 1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpg_float: /* 0x2e */
+/* File: arm/op_cmpg_float.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ flds s0, [r2] @ s0<- vBB
+ flds s1, [r3] @ s1<- vCC
+ fcmpes s0, s1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, #1 @ r0<- 1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpl_double: /* 0x2f */
+/* File: arm/op_cmpl_double.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ fldd d0, [r2] @ d0<- vBB
+ fldd d1, [r3] @ d1<- vCC
+ fcmped d0, d1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mvn r0, #0 @ r0<- -1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r1<- 1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpg_double: /* 0x30 */
+/* File: arm/op_cmpg_double.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ fldd d0, [r2] @ d0<- vBB
+ fldd d1, [r3] @ d1<- vCC
+ fcmped d0, d1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, #1 @ r0<- 1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmp_long: /* 0x31 */
+/* File: arm/op_cmp_long.S */
+ /*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ *
+ * We load the full values with LDM, but in practice many values could
+ * be resolved by only looking at the high word. This could be made
+ * faster or slower by splitting the LDM into a pair of LDRs.
+ *
+ * If we just wanted to set condition flags, we could do this:
+ * subs ip, r0, r2
+ * sbcs ip, r1, r3
+ * subeqs ip, r0, r2
+ * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific
+ * integer value, which we can do with 2 conditional mov/mvn instructions
+ * (set 1, set -1; if they're equal we already have 0 in ip), giving
+ * us a constant 5-cycle path plus a branch at the end to the
+ * instruction epilogue code. The multi-compare approach below needs
+ * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
+ * in the worst case (the 64-bit values are equal).
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ cmp r1, r3 @ compare (vBB+1, vCC+1)
+ blt .Lop_cmp_long_less @ signed compare on high part
+ bgt .Lop_cmp_long_greater
+ subs r1, r0, r2 @ r1<- r0 - r2
+ bhi .Lop_cmp_long_greater @ unsigned compare on low part
+ bne .Lop_cmp_long_less
+ b .Lop_cmp_long_finish @ equal; r1 already holds 0
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_eq: /* 0x32 */
+/* File: arm/op_if_eq.S */
+/* File: arm/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movne r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movne r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ne: /* 0x33 */
+/* File: arm/op_if_ne.S */
+/* File: arm/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ moveq r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ moveq r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_lt: /* 0x34 */
+/* File: arm/op_if_lt.S */
+/* File: arm/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movge r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movge r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ge: /* 0x35 */
+/* File: arm/op_if_ge.S */
+/* File: arm/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movlt r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movlt r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gt: /* 0x36 */
+/* File: arm/op_if_gt.S */
+/* File: arm/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movle r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movle r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_le: /* 0x37 */
+/* File: arm/op_if_le.S */
+/* File: arm/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movgt r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movgt r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_eqz: /* 0x38 */
+/* File: arm/op_if_eqz.S */
+/* File: arm/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movne r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ movne r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_nez: /* 0x39 */
+/* File: arm/op_if_nez.S */
+/* File: arm/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ moveq r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ moveq r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ltz: /* 0x3a */
+/* File: arm/op_if_ltz.S */
+/* File: arm/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movge r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ movge r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gez: /* 0x3b */
+/* File: arm/op_if_gez.S */
+/* File: arm/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movlt r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ movlt r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gtz: /* 0x3c */
+/* File: arm/op_if_gtz.S */
+/* File: arm/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movle r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ movle r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_lez: /* 0x3d */
+/* File: arm/op_if_lez.S */
+/* File: arm/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movgt r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ movgt r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_3e: /* 0x3e */
+/* File: arm/op_unused_3e.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_3f: /* 0x3f */
+/* File: arm/op_unused_3f.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_40: /* 0x40 */
+/* File: arm/op_unused_40.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_41: /* 0x41 */
+/* File: arm/op_unused_41.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_42: /* 0x42 */
+/* File: arm/op_unused_42.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_43: /* 0x43 */
+/* File: arm/op_unused_43.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget: /* 0x44 */
+/* File: arm/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldr r2, [r0, #MIRROR_INT_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r2, r9 @ vAA<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_wide: /* 0x45 */
+/* File: arm/op_aget_wide.S */
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_object: /* 0x46 */
+/* File: arm/op_aget_object.S */
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ EXPORT_PC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ bl artAGetObjectFromMterp @ (array, index)
+ ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ PREFETCH_INST 2
+ cmp r1, #0
+ bne MterpException
+ SET_VREG_OBJECT r0, r9
+ ADVANCE 2
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_boolean: /* 0x47 */
+/* File: arm/op_aget_boolean.S */
+/* File: arm/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldrb r2, [r0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r2, r9 @ vAA<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_byte: /* 0x48 */
+/* File: arm/op_aget_byte.S */
+/* File: arm/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldrsb r2, [r0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r2, r9 @ vAA<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_char: /* 0x49 */
+/* File: arm/op_aget_char.S */
+/* File: arm/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldrh r2, [r0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r2, r9 @ vAA<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_short: /* 0x4a */
+/* File: arm/op_aget_short.S */
+/* File: arm/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldrsh r2, [r0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r2, r9 @ vAA<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput: /* 0x4b */
+/* File: arm/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r9 @ r2<- vAA
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ str r2, [r0, #MIRROR_INT_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_wide: /* 0x4c */
+/* File: arm/op_aput_wide.S */
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ strd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_object: /* 0x4d */
+/* File: arm/op_aput_object.S */
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ bl MterpAputObject
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_boolean: /* 0x4e */
+/* File: arm/op_aput_boolean.S */
+/* File: arm/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r9 @ r2<- vAA
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ strb r2, [r0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_byte: /* 0x4f */
+/* File: arm/op_aput_byte.S */
+/* File: arm/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r9 @ r2<- vAA
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ strb r2, [r0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_char: /* 0x50 */
+/* File: arm/op_aput_char.S */
+/* File: arm/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r9 @ r2<- vAA
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ strh r2, [r0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_short: /* 0x51 */
+/* File: arm/op_aput_short.S */
+/* File: arm/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r9 @ r2<- vAA
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ strh r2, [r0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget: /* 0x52 */
+/* File: arm/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGet32InstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if 0
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_wide: /* 0x53 */
+/* File: arm/op_iget_wide.S */
+ /*
+ * 64-bit instance field get.
+ *
+ * for: iget-wide
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGet64InstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpException @ bail out
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_object: /* 0x54 */
+/* File: arm/op_iget_object.S */
+/* File: arm/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGetObjInstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if 1
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_boolean: /* 0x55 */
+/* File: arm/op_iget_boolean.S */
+/* File: arm/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGetBooleanInstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if 0
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_byte: /* 0x56 */
+/* File: arm/op_iget_byte.S */
+/* File: arm/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGetByteInstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if 0
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_char: /* 0x57 */
+/* File: arm/op_iget_char.S */
+/* File: arm/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGetCharInstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if 0
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_short: /* 0x58 */
+/* File: arm/op_iget_short.S */
+/* File: arm/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGetShortInstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if 0
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput: /* 0x59 */
+/* File: arm/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet32InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r2, r2 @ r2<- fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet32InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_wide: /* 0x5a */
+/* File: arm/op_iput_wide.S */
+ /* iput-wide vA, vB, field@CCCC */
+ .extern artSet64InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet64InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_object: /* 0x5b */
+/* File: arm/op_iput_object.S */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ mov r3, rSELF
+ bl MterpIputObject
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_boolean: /* 0x5c */
+/* File: arm/op_iput_boolean.S */
+/* File: arm/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet8InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r2, r2 @ r2<- fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet8InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_byte: /* 0x5d */
+/* File: arm/op_iput_byte.S */
+/* File: arm/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet8InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r2, r2 @ r2<- fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet8InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_char: /* 0x5e */
+/* File: arm/op_iput_char.S */
+/* File: arm/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet16InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r2, r2 @ r2<- fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet16InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_short: /* 0x5f */
+/* File: arm/op_iput_short.S */
+/* File: arm/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet16InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r2, r2 @ r2<- fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet16InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget: /* 0x60 */
+/* File: arm/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern artGet32StaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGet32StaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if 0
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_wide: /* 0x61 */
+/* File: arm/op_sget_wide.S */
+ /*
+ * SGET_WIDE handler wrapper.
+ *
+ */
+ /* sget-wide vAA, field@BBBB */
+
+ .extern artGet64StaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGet64StaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r9, rINST, lsr #8 @ r9<- AA
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_object: /* 0x62 */
+/* File: arm/op_sget_object.S */
+/* File: arm/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern artGetObjStaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGetObjStaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if 1
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_boolean: /* 0x63 */
+/* File: arm/op_sget_boolean.S */
+/* File: arm/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern artGetBooleanStaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGetBooleanStaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if 0
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_byte: /* 0x64 */
+/* File: arm/op_sget_byte.S */
+/* File: arm/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern artGetByteStaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGetByteStaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if 0
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_char: /* 0x65 */
+/* File: arm/op_sget_char.S */
+/* File: arm/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern artGetCharStaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGetCharStaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if 0
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_short: /* 0x66 */
+/* File: arm/op_sget_short.S */
+/* File: arm/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern artGetShortStaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGetShortStaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if 0
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput: /* 0x67 */
+/* File: arm/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ mov r3, rINST, lsr #8 @ r3<- AA
+ GET_VREG r1, r3 @ r1<= fp[AA]
+ ldr r2, [rFP, #OFF_FP_METHOD]
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet32StaticFromCode
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_wide: /* 0x68 */
+/* File: arm/op_sput_wide.S */
+ /*
+ * SPUT_WIDE handler wrapper.
+ *
+ */
+ /* sput-wide vAA, field@BBBB */
+ .extern artSet64IndirectStaticFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rINST, lsr #8 @ r3<- AA
+ add r2, rFP, r2, lsl #2
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet64IndirectStaticFromMterp
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_object: /* 0x69 */
+/* File: arm/op_sput_object.S */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ mov r3, rSELF
+ bl MterpSputObject
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_boolean: /* 0x6a */
+/* File: arm/op_sput_boolean.S */
+/* File: arm/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ mov r3, rINST, lsr #8 @ r3<- AA
+ GET_VREG r1, r3 @ r1<= fp[AA]
+ ldr r2, [rFP, #OFF_FP_METHOD]
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet8StaticFromCode
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_byte: /* 0x6b */
+/* File: arm/op_sput_byte.S */
+/* File: arm/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ mov r3, rINST, lsr #8 @ r3<- AA
+ GET_VREG r1, r3 @ r1<= fp[AA]
+ ldr r2, [rFP, #OFF_FP_METHOD]
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet8StaticFromCode
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_char: /* 0x6c */
+/* File: arm/op_sput_char.S */
+/* File: arm/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ mov r3, rINST, lsr #8 @ r3<- AA
+ GET_VREG r1, r3 @ r1<= fp[AA]
+ ldr r2, [rFP, #OFF_FP_METHOD]
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet16StaticFromCode
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_short: /* 0x6d */
+/* File: arm/op_sput_short.S */
+/* File: arm/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ mov r3, rINST, lsr #8 @ r3<- AA
+ GET_VREG r1, r3 @ r1<= fp[AA]
+ ldr r2, [rFP, #OFF_FP_METHOD]
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet16StaticFromCode
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual: /* 0x6e */
+/* File: arm/op_invoke_virtual.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtual
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeVirtual
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_super: /* 0x6f */
+/* File: arm/op_invoke_super.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeSuper
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeSuper
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_direct: /* 0x70 */
+/* File: arm/op_invoke_direct.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeDirect
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeDirect
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_static: /* 0x71 */
+/* File: arm/op_invoke_static.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeStatic
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeStatic
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_interface: /* 0x72 */
+/* File: arm/op_invoke_interface.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeInterface
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeInterface
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_void_no_barrier: /* 0x73 */
+/* File: arm/op_return_void_no_barrier.S */
+ mov r0, #0
+ mov r1, #0
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_range: /* 0x74 */
+/* File: arm/op_invoke_virtual_range.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeVirtualRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_super_range: /* 0x75 */
+/* File: arm/op_invoke_super_range.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeSuperRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeSuperRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_direct_range: /* 0x76 */
+/* File: arm/op_invoke_direct_range.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeDirectRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeDirectRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_static_range: /* 0x77 */
+/* File: arm/op_invoke_static_range.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeStaticRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeStaticRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_interface_range: /* 0x78 */
+/* File: arm/op_invoke_interface_range.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeInterfaceRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeInterfaceRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_79: /* 0x79 */
+/* File: arm/op_unused_79.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_7a: /* 0x7a */
+/* File: arm/op_unused_7a.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_int: /* 0x7b */
+/* File: arm/op_neg_int.S */
+/* File: arm/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ rsb r0, r0, #0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_not_int: /* 0x7c */
+/* File: arm/op_not_int.S */
+/* File: arm/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ mvn r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_long: /* 0x7d */
+/* File: arm/op_neg_long.S */
+/* File: arm/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ rsbs r0, r0, #0 @ optional op; may set condition codes
+ rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_not_long: /* 0x7e */
+/* File: arm/op_not_long.S */
+/* File: arm/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ mvn r0, r0 @ optional op; may set condition codes
+ mvn r1, r1 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_float: /* 0x7f */
+/* File: arm/op_neg_float.S */
+/* File: arm/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_double: /* 0x80 */
+/* File: arm/op_neg_double.S */
+/* File: arm/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_long: /* 0x81 */
+/* File: arm/op_int_to_long.S */
+/* File: arm/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ mov r1, r0, asr #31 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_float: /* 0x82 */
+/* File: arm/op_int_to_float.S */
+/* File: arm/funop.S */
+ /*
+ * Generic 32-bit unary floating-point operation. Provide an "instr"
+ * line that specifies an instruction that performs "s1 = op s0".
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ fsitos s1, s0 @ s1<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s1, [r9] @ vA<- s1
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_double: /* 0x83 */
+/* File: arm/op_int_to_double.S */
+/* File: arm/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op s0".
+ *
+ * For: int-to-double, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ fsitod d0, s0 @ d0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fstd d0, [r9] @ vA<- d0
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_int: /* 0x84 */
+/* File: arm/op_long_to_int.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: arm/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[B]
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ .if 0
+ SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[A]<- r2
+ .endif
+ GOTO_OPCODE ip @ execute next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_float: /* 0x85 */
+/* File: arm/op_long_to_float.S */
+/* File: arm/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for op_move.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_l2f @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_double: /* 0x86 */
+/* File: arm/op_long_to_double.S */
+ /*
+ * Specialised 64-bit floating point operation.
+ *
+ * Note: The result will be returned in d2.
+ *
+ * For: long-to-double
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ vldr d0, [r3] @ d0<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ vcvt.f64.s32 d1, s1 @ d1<- (double)(vAAh)
+ vcvt.f64.u32 d2, s0 @ d2<- (double)(vAAl)
+ vldr d3, constvalop_long_to_double
+ vmla.f64 d2, d1, d3 @ d2<- vAAh*2^32 + vAAl
+
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ vstr.64 d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+ /* literal pool helper */
+constvalop_long_to_double:
+ .8byte 0x41f0000000000000
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_int: /* 0x87 */
+/* File: arm/op_float_to_int.S */
+/* File: arm/funop.S */
+ /*
+ * Generic 32-bit unary floating-point operation. Provide an "instr"
+ * line that specifies an instruction that performs "s1 = op s0".
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ ftosizs s1, s0 @ s1<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s1, [r9] @ vA<- s1
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_long: /* 0x88 */
+/* File: arm/op_float_to_long.S */
+@include "arm/unopWider.S" {"instr":"bl __aeabi_f2lz"}
+/* File: arm/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ bl f2l_doconv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 9-10 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_double: /* 0x89 */
+/* File: arm/op_float_to_double.S */
+/* File: arm/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op s0".
+ *
+ * For: int-to-double, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ fcvtds d0, s0 @ d0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fstd d0, [r9] @ vA<- d0
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_int: /* 0x8a */
+/* File: arm/op_double_to_int.S */
+/* File: arm/funopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary floating point operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op d0".
+ *
+ * For: double-to-int, double-to-float
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ fldd d0, [r3] @ d0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ ftosizd s0, d0 @ s0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s0, [r9] @ vA<- s0
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_long: /* 0x8b */
+/* File: arm/op_double_to_long.S */
+@include "arm/unopWide.S" {"instr":"bl __aeabi_d2lz"}
+/* File: arm/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl d2l_doconv @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-11 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_float: /* 0x8c */
+/* File: arm/op_double_to_float.S */
+/* File: arm/funopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary floating point operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op d0".
+ *
+ * For: double-to-int, double-to-float
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ fldd d0, [r3] @ d0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ fcvtsd s0, d0 @ s0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s0, [r9] @ vA<- s0
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_byte: /* 0x8d */
+/* File: arm/op_int_to_byte.S */
+/* File: arm/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ sxtb r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_char: /* 0x8e */
+/* File: arm/op_int_to_char.S */
+/* File: arm/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ uxth r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_short: /* 0x8f */
+/* File: arm/op_int_to_short.S */
+/* File: arm/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ sxth r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int: /* 0x90 */
+/* File: arm/op_add_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_int: /* 0x91 */
+/* File: arm/op_sub_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ sub r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int: /* 0x92 */
+/* File: arm/op_mul_int.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int: /* 0x93 */
+/* File: arm/op_div_int.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int
+ *
+ */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int: /* 0x94 */
+/* File: arm/op_rem_int.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int
+ *
+ */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op, r0-r2 changed
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int: /* 0x95 */
+/* File: arm/op_and_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int: /* 0x96 */
+/* File: arm/op_or_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int: /* 0x97 */
+/* File: arm/op_xor_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int: /* 0x98 */
+/* File: arm/op_shl_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int: /* 0x99 */
+/* File: arm/op_shr_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int: /* 0x9a */
+/* File: arm/op_ushr_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_long: /* 0x9b */
+/* File: arm/op_add_long.S */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ adds r0, r0, r2 @ optional op; may set condition codes
+ adc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_long: /* 0x9c */
+/* File: arm/op_sub_long.S */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ subs r0, r0, r2 @ optional op; may set condition codes
+ sbc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_long: /* 0x9d */
+/* File: arm/op_mul_long.S */
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+ * WX
+ * x YZ
+ * --------
+ * ZW ZX
+ * YW YX
+ *
+ * The low word of the result holds ZX, the high word holds
+ * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
+ * it doesn't fit in the low 64 bits.
+ *
+ * Unlike most ARM math operations, multiply instructions have
+ * restrictions on using the same register more than once (Rd and Rm
+ * cannot be the same).
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST, lsr #8 @ r0<- AA
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ add r0, rFP, r0, lsl #2 @ r0<- &fp[AA]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_long: /* 0x9e */
+/* File: arm/op_div_long.S */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_long: /* 0x9f */
+/* File: arm/op_rem_long.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_long: /* 0xa0 */
+/* File: arm/op_and_long.S */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ and r0, r0, r2 @ optional op; may set condition codes
+ and r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_long: /* 0xa1 */
+/* File: arm/op_or_long.S */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ orr r0, r0, r2 @ optional op; may set condition codes
+ orr r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_long: /* 0xa2 */
+/* File: arm/op_xor_long.S */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ eor r0, r0, r2 @ optional op; may set condition codes
+ eor r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_long: /* 0xa3 */
+/* File: arm/op_shl_long.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_long: /* 0xa4 */
+/* File: arm/op_shr_long.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_long: /* 0xa5 */
+/* File: arm/op_ushr_long.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_float: /* 0xa6 */
+/* File: arm/op_add_float.S */
+/* File: arm/fbinop.S */
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fadds s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_float: /* 0xa7 */
+/* File: arm/op_sub_float.S */
+/* File: arm/fbinop.S */
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fsubs s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_float: /* 0xa8 */
+/* File: arm/op_mul_float.S */
+/* File: arm/fbinop.S */
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fmuls s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_float: /* 0xa9 */
+/* File: arm/op_div_float.S */
+/* File: arm/fbinop.S */
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fdivs s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_float: /* 0xaa */
+/* File: arm/op_rem_float.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl fmodf @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_double: /* 0xab */
+/* File: arm/op_add_double.S */
+/* File: arm/fbinopWide.S */
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ faddd d2, d0, d1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_double: /* 0xac */
+/* File: arm/op_sub_double.S */
+/* File: arm/fbinopWide.S */
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fsubd d2, d0, d1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_double: /* 0xad */
+/* File: arm/op_mul_double.S */
+/* File: arm/fbinopWide.S */
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fmuld d2, d0, d1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_double: /* 0xae */
+/* File: arm/op_div_double.S */
+/* File: arm/fbinopWide.S */
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fdivd d2, d0, d1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_double: /* 0xaf */
+/* File: arm/op_rem_double.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_2addr: /* 0xb0 */
+/* File: arm/op_add_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_int_2addr: /* 0xb1 */
+/* File: arm/op_sub_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ sub r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_2addr: /* 0xb2 */
+/* File: arm/op_mul_int_2addr.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_2addr: /* 0xb3 */
+/* File: arm/op_div_int_2addr.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/2addr
+ *
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_2addr: /* 0xb4 */
+/* File: arm/op_rem_int_2addr.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/2addr
+ *
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_2addr: /* 0xb5 */
+/* File: arm/op_and_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_2addr: /* 0xb6 */
+/* File: arm/op_or_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_2addr: /* 0xb7 */
+/* File: arm/op_xor_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int_2addr: /* 0xb8 */
+/* File: arm/op_shl_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int_2addr: /* 0xb9 */
+/* File: arm/op_shr_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int_2addr: /* 0xba */
+/* File: arm/op_ushr_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_long_2addr: /* 0xbb */
+/* File: arm/op_add_long_2addr.S */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ adds r0, r0, r2 @ optional op; may set condition codes
+ adc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_long_2addr: /* 0xbc */
+/* File: arm/op_sub_long_2addr.S */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ subs r0, r0, r2 @ optional op; may set condition codes
+ sbc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_long_2addr: /* 0xbd */
+/* File: arm/op_mul_long_2addr.S */
+ /*
+ * Signed 64-bit integer multiply, "/2addr" version.
+ *
+ * See op_mul_long for an explanation.
+ *
+ * We get a little tight on registers, so to avoid looking up &fp[A]
+ * again we stuff it into rINST.
+ */
+ /* mul-long/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST @ r0<- &fp[A] (free up rINST)
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_long_2addr: /* 0xbe */
+/* File: arm/op_div_long_2addr.S */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_long_2addr: /* 0xbf */
+/* File: arm/op_rem_long_2addr.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_long_2addr: /* 0xc0 */
+/* File: arm/op_and_long_2addr.S */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ and r0, r0, r2 @ optional op; may set condition codes
+ and r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_long_2addr: /* 0xc1 */
+/* File: arm/op_or_long_2addr.S */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ orr r0, r0, r2 @ optional op; may set condition codes
+ orr r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_long_2addr: /* 0xc2 */
+/* File: arm/op_xor_long_2addr.S */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ eor r0, r0, r2 @ optional op; may set condition codes
+ eor r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_long_2addr: /* 0xc3 */
+/* File: arm/op_shl_long_2addr.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_long_2addr: /* 0xc4 */
+/* File: arm/op_shr_long_2addr.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_long_2addr: /* 0xc5 */
+/* File: arm/op_ushr_long_2addr.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_float_2addr: /* 0xc6 */
+/* File: arm/op_add_float_2addr.S */
+/* File: arm/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ fadds s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_float_2addr: /* 0xc7 */
+/* File: arm/op_sub_float_2addr.S */
+/* File: arm/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ fsubs s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_float_2addr: /* 0xc8 */
+/* File: arm/op_mul_float_2addr.S */
+/* File: arm/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ fmuls s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_float_2addr: /* 0xc9 */
+/* File: arm/op_div_float_2addr.S */
+/* File: arm/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ fdivs s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_float_2addr: /* 0xca */
+/* File: arm/op_rem_float_2addr.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmodf @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_double_2addr: /* 0xcb */
+/* File: arm/op_add_double_2addr.S */
+/* File: arm/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ faddd d2, d0, d1 @ d2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_double_2addr: /* 0xcc */
+/* File: arm/op_sub_double_2addr.S */
+/* File: arm/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ fsubd d2, d0, d1 @ d2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_double_2addr: /* 0xcd */
+/* File: arm/op_mul_double_2addr.S */
+/* File: arm/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ fmuld d2, d0, d1 @ d2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_double_2addr: /* 0xce */
+/* File: arm/op_div_double_2addr.S */
+/* File: arm/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ fdivd d2, d0, d1 @ d2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_double_2addr: /* 0xcf */
+/* File: arm/op_rem_double_2addr.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_lit16: /* 0xd0 */
+/* File: arm/op_add_int_lit16.S */
+/* File: arm/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rsub_int: /* 0xd1 */
+/* File: arm/op_rsub_int.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: arm/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ rsb r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_lit16: /* 0xd2 */
+/* File: arm/op_mul_int_lit16.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: arm/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_lit16: /* 0xd3 */
+/* File: arm/op_div_int_lit16.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/lit16
+ *
+ */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_lit16: /* 0xd4 */
+/* File: arm/op_rem_int_lit16.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/lit16
+ *
+ */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_lit16: /* 0xd5 */
+/* File: arm/op_and_int_lit16.S */
+/* File: arm/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_lit16: /* 0xd6 */
+/* File: arm/op_or_int_lit16.S */
+/* File: arm/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_lit16: /* 0xd7 */
+/* File: arm/op_xor_int_lit16.S */
+/* File: arm/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_lit8: /* 0xd8 */
+/* File: arm/op_add_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rsub_int_lit8: /* 0xd9 */
+/* File: arm/op_rsub_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ rsb r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_lit8: /* 0xda */
+/* File: arm/op_mul_int_lit8.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_lit8: /* 0xdb */
+/* File: arm/op_div_int_lit8.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/lit8
+ *
+ */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_lit8: /* 0xdc */
+/* File: arm/op_rem_int_lit8.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/lit8
+ *
+ */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_lit8: /* 0xdd */
+/* File: arm/op_and_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_lit8: /* 0xde */
+/* File: arm/op_or_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_lit8: /* 0xdf */
+/* File: arm/op_xor_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int_lit8: /* 0xe0 */
+/* File: arm/op_shl_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int_lit8: /* 0xe1 */
+/* File: arm/op_shr_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int_lit8: /* 0xe2 */
+/* File: arm/op_ushr_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_quick: /* 0xe3 */
+/* File: arm/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldr r0, [r3, r1] @ r0<- obj.field
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r2 @ fp[A]<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_wide_quick: /* 0xe4 */
+/* File: arm/op_iget_wide_quick.S */
+ /* iget-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH ip, 1 @ ip<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_object_quick: /* 0xe5 */
+/* File: arm/op_iget_object_quick.S */
+ /* For: iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r0, r2 @ r0<- object we're operating on
+ cmp r0, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ bl artIGetObjectFromMterp @ (obj, offset)
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_quick: /* 0xe6 */
+/* File: arm/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ GET_VREG r0, r2 @ r0<- fp[A]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ str r0, [r3, r1] @ obj.field<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_wide_quick: /* 0xe7 */
+/* File: arm/op_iput_wide_quick.S */
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r3, 1 @ r3<- field byte offset
+ GET_VREG r2, r2 @ r2<- fp[B], the object pointer
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ cmp r2, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ add r0, rFP, r0, lsl #2 @ r0<- &fp[A]
+ ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ strd r0, [r2, r3] @ obj.field<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_object_quick: /* 0xe8 */
+/* File: arm/op_iput_object_quick.S */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ bl MterpIputObjectQuick
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_quick: /* 0xe9 */
+/* File: arm/op_invoke_virtual_quick.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualQuick
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeVirtualQuick
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_range_quick: /* 0xea */
+/* File: arm/op_invoke_virtual_range_quick.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualQuickRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeVirtualQuickRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_boolean_quick: /* 0xeb */
+/* File: arm/op_iput_boolean_quick.S */
+/* File: arm/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ GET_VREG r0, r2 @ r0<- fp[A]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ strb r0, [r3, r1] @ obj.field<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_byte_quick: /* 0xec */
+/* File: arm/op_iput_byte_quick.S */
+/* File: arm/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ GET_VREG r0, r2 @ r0<- fp[A]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ strb r0, [r3, r1] @ obj.field<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_char_quick: /* 0xed */
+/* File: arm/op_iput_char_quick.S */
+/* File: arm/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ GET_VREG r0, r2 @ r0<- fp[A]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ strh r0, [r3, r1] @ obj.field<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_short_quick: /* 0xee */
+/* File: arm/op_iput_short_quick.S */
+/* File: arm/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ GET_VREG r0, r2 @ r0<- fp[A]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ strh r0, [r3, r1] @ obj.field<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_boolean_quick: /* 0xef */
+/* File: arm/op_iget_boolean_quick.S */
+/* File: arm/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldrb r0, [r3, r1] @ r0<- obj.field
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r2 @ fp[A]<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_byte_quick: /* 0xf0 */
+/* File: arm/op_iget_byte_quick.S */
+/* File: arm/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldrsb r0, [r3, r1] @ r0<- obj.field
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r2 @ fp[A]<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_char_quick: /* 0xf1 */
+/* File: arm/op_iget_char_quick.S */
+/* File: arm/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldrh r0, [r3, r1] @ r0<- obj.field
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r2 @ fp[A]<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_short_quick: /* 0xf2 */
+/* File: arm/op_iget_short_quick.S */
+/* File: arm/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldrsh r0, [r3, r1] @ r0<- obj.field
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r2 @ fp[A]<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_lambda: /* 0xf3 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_f4: /* 0xf4 */
+/* File: arm/op_unused_f4.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_capture_variable: /* 0xf5 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_create_lambda: /* 0xf6 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_liberate_variable: /* 0xf7 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_box_lambda: /* 0xf8 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unbox_lambda: /* 0xf9 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fa: /* 0xfa */
+/* File: arm/op_unused_fa.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fb: /* 0xfb */
+/* File: arm/op_unused_fb.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fc: /* 0xfc */
+/* File: arm/op_unused_fc.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fd: /* 0xfd */
+/* File: arm/op_unused_fd.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fe: /* 0xfe */
+/* File: arm/op_unused_fe.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_ff: /* 0xff */
+/* File: arm/op_unused_ff.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+ .balign 128
+ .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
+ .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ * Sister implementations
+ * ===========================================================================
+ */
+ .global artMterpAsmSisterStart
+ .type artMterpAsmSisterStart, %function
+ .text
+ .balign 4
+artMterpAsmSisterStart:
+
+/* continuation for op_cmp_long */
+
+.Lop_cmp_long_less:
+ mvn r1, #0 @ r1<- -1
+ @ Want to cond code the next mov so we can avoid branch, but don't see it;
+ @ instead, we just replicate the tail end.
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+.Lop_cmp_long_greater:
+ mov r1, #1 @ r1<- 1
+ @ fall through to _finish
+
+.Lop_cmp_long_finish:
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* continuation for op_float_to_long */
+/*
+ * Convert the float in r0 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+f2l_doconv:
+ stmfd sp!, {r4, lr}
+ mov r1, #0x5f000000 @ (float)maxlong
+ mov r4, r0
+ bl __aeabi_fcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffff)
+ mvnne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, #0xdf000000 @ (float)minlong
+ bl __aeabi_fcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (80000000)
+ movne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, r4
+ bl __aeabi_fcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ ldmeqfd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ bl __aeabi_f2lz @ convert float to long
+ ldmfd sp!, {r4, pc}
+
+/* continuation for op_double_to_long */
+/*
+ * Convert the double in r0/r1 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+d2l_doconv:
+ stmfd sp!, {r4, r5, lr} @ save regs
+ mov r3, #0x43000000 @ maxlong, as a double (high word)
+ add r3, #0x00e00000 @ 0x43e00000
+ mov r2, #0 @ maxlong, as a double (low word)
+ sub sp, sp, #4 @ align for EABI
+ mov r4, r0 @ save a copy of r0
+ mov r5, r1 @ and r1
+ bl __aeabi_dcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffffffffffff)
+ mvnne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r3, #0xc3000000 @ minlong, as a double (high word)
+ add r3, #0x00e00000 @ 0xc3e00000
+ mov r2, #0 @ minlong, as a double (low word)
+ bl __aeabi_dcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (8000000000000000)
+ movne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r2, r4 @ compare against self
+ mov r3, r5
+ bl __aeabi_dcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ beq 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ bl __aeabi_d2lz @ convert double to long
+
+1:
+ add sp, sp, #4
+ ldmfd sp!, {r4, r5, pc}
+
+ .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
+ .global artMterpAsmSisterEnd
+artMterpAsmSisterEnd:
+
+
+ .global artMterpAsmAltInstructionStart
+ .type artMterpAsmAltInstructionStart, %function
+ .text
+
+artMterpAsmAltInstructionStart = .L_ALT_op_nop
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_nop: /* 0x00 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (0 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move: /* 0x01 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (1 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_from16: /* 0x02 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (2 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_16: /* 0x03 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (3 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide: /* 0x04 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (4 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide_from16: /* 0x05 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (5 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide_16: /* 0x06 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (6 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object: /* 0x07 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (7 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object_from16: /* 0x08 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (8 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object_16: /* 0x09 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (9 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result: /* 0x0a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (10 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result_wide: /* 0x0b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (11 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result_object: /* 0x0c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (12 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_exception: /* 0x0d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (13 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_void: /* 0x0e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (14 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return: /* 0x0f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (15 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_wide: /* 0x10 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (16 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_object: /* 0x11 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (17 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_4: /* 0x12 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (18 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_16: /* 0x13 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (19 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const: /* 0x14 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (20 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_high16: /* 0x15 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (21 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_16: /* 0x16 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (22 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_32: /* 0x17 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (23 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide: /* 0x18 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (24 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_high16: /* 0x19 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (25 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_string: /* 0x1a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (26 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_string_jumbo: /* 0x1b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (27 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_class: /* 0x1c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (28 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_monitor_enter: /* 0x1d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (29 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_monitor_exit: /* 0x1e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (30 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_check_cast: /* 0x1f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (31 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_instance_of: /* 0x20 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (32 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_array_length: /* 0x21 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (33 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_new_instance: /* 0x22 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (34 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_new_array: /* 0x23 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (35 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_filled_new_array: /* 0x24 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (36 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_filled_new_array_range: /* 0x25 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (37 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_fill_array_data: /* 0x26 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (38 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_throw: /* 0x27 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (39 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto: /* 0x28 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (40 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto_16: /* 0x29 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (41 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto_32: /* 0x2a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (42 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_packed_switch: /* 0x2b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (43 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sparse_switch: /* 0x2c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (44 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpl_float: /* 0x2d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (45 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpg_float: /* 0x2e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (46 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpl_double: /* 0x2f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (47 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpg_double: /* 0x30 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (48 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmp_long: /* 0x31 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (49 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_eq: /* 0x32 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (50 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ne: /* 0x33 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (51 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_lt: /* 0x34 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (52 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ge: /* 0x35 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (53 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gt: /* 0x36 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (54 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_le: /* 0x37 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (55 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_eqz: /* 0x38 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (56 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_nez: /* 0x39 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (57 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ltz: /* 0x3a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (58 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gez: /* 0x3b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (59 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gtz: /* 0x3c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (60 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_lez: /* 0x3d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (61 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_3e: /* 0x3e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (62 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_3f: /* 0x3f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (63 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_40: /* 0x40 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (64 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_41: /* 0x41 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (65 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_42: /* 0x42 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (66 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_43: /* 0x43 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (67 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget: /* 0x44 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (68 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_wide: /* 0x45 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (69 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_object: /* 0x46 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (70 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_boolean: /* 0x47 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (71 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_byte: /* 0x48 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (72 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_char: /* 0x49 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (73 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_short: /* 0x4a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (74 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput: /* 0x4b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (75 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_wide: /* 0x4c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (76 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_object: /* 0x4d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (77 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_boolean: /* 0x4e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (78 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_byte: /* 0x4f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (79 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_char: /* 0x50 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (80 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_short: /* 0x51 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (81 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget: /* 0x52 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (82 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_wide: /* 0x53 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (83 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_object: /* 0x54 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (84 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_boolean: /* 0x55 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (85 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_byte: /* 0x56 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (86 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_char: /* 0x57 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (87 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_short: /* 0x58 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (88 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput: /* 0x59 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (89 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_wide: /* 0x5a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (90 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_object: /* 0x5b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (91 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_boolean: /* 0x5c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (92 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_byte: /* 0x5d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (93 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_char: /* 0x5e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (94 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_short: /* 0x5f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (95 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget: /* 0x60 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (96 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_wide: /* 0x61 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (97 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_object: /* 0x62 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (98 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_boolean: /* 0x63 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (99 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_byte: /* 0x64 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (100 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_char: /* 0x65 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (101 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_short: /* 0x66 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (102 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput: /* 0x67 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (103 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_wide: /* 0x68 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (104 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_object: /* 0x69 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (105 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_boolean: /* 0x6a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (106 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_byte: /* 0x6b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (107 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_char: /* 0x6c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (108 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_short: /* 0x6d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (109 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual: /* 0x6e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (110 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_super: /* 0x6f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (111 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_direct: /* 0x70 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (112 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_static: /* 0x71 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (113 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_interface: /* 0x72 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (114 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_void_no_barrier: /* 0x73 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (115 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_range: /* 0x74 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (116 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_super_range: /* 0x75 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (117 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_direct_range: /* 0x76 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (118 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_static_range: /* 0x77 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (119 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_interface_range: /* 0x78 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (120 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_79: /* 0x79 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (121 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_7a: /* 0x7a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (122 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_int: /* 0x7b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (123 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_not_int: /* 0x7c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (124 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_long: /* 0x7d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (125 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_not_long: /* 0x7e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (126 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_float: /* 0x7f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (127 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_double: /* 0x80 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (128 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_long: /* 0x81 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (129 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_float: /* 0x82 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (130 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_double: /* 0x83 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (131 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_int: /* 0x84 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (132 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_float: /* 0x85 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (133 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_double: /* 0x86 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (134 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_int: /* 0x87 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (135 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_long: /* 0x88 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (136 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_double: /* 0x89 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (137 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_int: /* 0x8a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (138 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_long: /* 0x8b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (139 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_float: /* 0x8c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (140 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_byte: /* 0x8d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (141 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_char: /* 0x8e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (142 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_short: /* 0x8f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (143 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int: /* 0x90 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (144 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_int: /* 0x91 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (145 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int: /* 0x92 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (146 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int: /* 0x93 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (147 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int: /* 0x94 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (148 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int: /* 0x95 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (149 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int: /* 0x96 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (150 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int: /* 0x97 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (151 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int: /* 0x98 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (152 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int: /* 0x99 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (153 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int: /* 0x9a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (154 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_long: /* 0x9b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (155 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_long: /* 0x9c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (156 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_long: /* 0x9d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (157 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_long: /* 0x9e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (158 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_long: /* 0x9f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (159 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_long: /* 0xa0 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (160 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_long: /* 0xa1 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (161 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_long: /* 0xa2 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (162 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_long: /* 0xa3 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (163 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_long: /* 0xa4 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (164 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_long: /* 0xa5 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (165 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_float: /* 0xa6 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (166 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_float: /* 0xa7 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (167 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_float: /* 0xa8 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (168 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_float: /* 0xa9 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (169 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_float: /* 0xaa */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (170 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_double: /* 0xab */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (171 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_double: /* 0xac */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (172 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_double: /* 0xad */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (173 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_double: /* 0xae */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (174 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_double: /* 0xaf */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (175 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_2addr: /* 0xb0 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (176 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_int_2addr: /* 0xb1 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (177 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_2addr: /* 0xb2 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (178 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_2addr: /* 0xb3 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (179 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_2addr: /* 0xb4 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (180 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_2addr: /* 0xb5 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (181 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_2addr: /* 0xb6 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (182 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_2addr: /* 0xb7 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (183 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int_2addr: /* 0xb8 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (184 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int_2addr: /* 0xb9 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (185 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int_2addr: /* 0xba */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (186 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_long_2addr: /* 0xbb */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (187 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_long_2addr: /* 0xbc */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (188 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_long_2addr: /* 0xbd */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (189 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_long_2addr: /* 0xbe */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (190 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_long_2addr: /* 0xbf */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (191 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_long_2addr: /* 0xc0 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (192 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_long_2addr: /* 0xc1 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (193 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_long_2addr: /* 0xc2 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (194 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_long_2addr: /* 0xc3 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (195 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_long_2addr: /* 0xc4 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (196 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_long_2addr: /* 0xc5 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (197 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_float_2addr: /* 0xc6 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (198 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_float_2addr: /* 0xc7 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (199 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_float_2addr: /* 0xc8 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (200 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_float_2addr: /* 0xc9 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (201 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_float_2addr: /* 0xca */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (202 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_double_2addr: /* 0xcb */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (203 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_double_2addr: /* 0xcc */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (204 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_double_2addr: /* 0xcd */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (205 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_double_2addr: /* 0xce */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (206 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_double_2addr: /* 0xcf */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (207 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_lit16: /* 0xd0 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (208 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rsub_int: /* 0xd1 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (209 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_lit16: /* 0xd2 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (210 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_lit16: /* 0xd3 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (211 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_lit16: /* 0xd4 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (212 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_lit16: /* 0xd5 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (213 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_lit16: /* 0xd6 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (214 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_lit16: /* 0xd7 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (215 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_lit8: /* 0xd8 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (216 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rsub_int_lit8: /* 0xd9 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (217 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_lit8: /* 0xda */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (218 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_lit8: /* 0xdb */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (219 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_lit8: /* 0xdc */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (220 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_lit8: /* 0xdd */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (221 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_lit8: /* 0xde */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (222 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_lit8: /* 0xdf */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (223 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int_lit8: /* 0xe0 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (224 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int_lit8: /* 0xe1 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (225 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int_lit8: /* 0xe2 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (226 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_quick: /* 0xe3 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (227 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_wide_quick: /* 0xe4 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (228 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_object_quick: /* 0xe5 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (229 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_quick: /* 0xe6 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (230 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_wide_quick: /* 0xe7 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (231 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_object_quick: /* 0xe8 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (232 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (233 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (234 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_boolean_quick: /* 0xeb */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (235 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_byte_quick: /* 0xec */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (236 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_char_quick: /* 0xed */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (237 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_short_quick: /* 0xee */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (238 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_boolean_quick: /* 0xef */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (239 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_byte_quick: /* 0xf0 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (240 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_char_quick: /* 0xf1 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (241 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_short_quick: /* 0xf2 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (242 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_lambda: /* 0xf3 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (243 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_f4: /* 0xf4 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (244 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_capture_variable: /* 0xf5 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (245 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_create_lambda: /* 0xf6 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (246 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_liberate_variable: /* 0xf7 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (247 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_box_lambda: /* 0xf8 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (248 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unbox_lambda: /* 0xf9 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (249 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fa: /* 0xfa */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (250 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fb: /* 0xfb */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (251 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fc: /* 0xfc */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (252 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fd: /* 0xfd */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (253 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fe: /* 0xfe */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (254 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_ff: /* 0xff */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (255 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+ .balign 128
+ .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
+ .global artMterpAsmAltInstructionEnd
+artMterpAsmAltInstructionEnd:
+/* File: arm/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+#define MTERP_LOGGING 0
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogDivideByZeroException
+#endif
+ b MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogArrayIndexException
+#endif
+ b MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNegativeArraySizeException
+#endif
+ b MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNoSuchMethodException
+#endif
+ b MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNullObjectException
+#endif
+ b MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogExceptionThrownException
+#endif
+ b MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ ldr r2, [rSELF, #THREAD_FLAGS_OFFSET]
+ bl MterpLogSuspendFallback
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ ldr r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ cmp r0, #0 @ Exception pending?
+ beq MterpFallback @ If not, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpHandleException @ (self, shadow_frame)
+ cmp r0, #0
+ beq MterpExceptionReturn @ no local catch, back to caller.
+ ldr r0, [rFP, #OFF_FP_CODE_ITEM]
+ ldr r1, [rFP, #OFF_FP_DEX_PC]
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+ add rPC, r0, #CODEITEM_INSNS_OFFSET
+ add rPC, rPC, r1, lsl #1 @ generate new dex_pc_ptr
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+ /* resume execution at catch block */
+ FETCH_INST
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+ /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request. Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ EXPORT_PC
+ mov r0, rSELF
+ ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ blne MterpSuspendCheck @ (self)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogFallback
+#endif
+MterpCommonFallback:
+ mov r0, #0 @ signal retry with reference interpreter.
+ b MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ mov r0, #1 @ signal return to caller.
+ b MterpDone
+MterpReturn:
+ ldr r2, [rFP, #OFF_FP_RESULT_REGISTER]
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ str r0, [r2]
+ str r1, [r2, #4]
+ mov r0, rSELF
+ ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ blne MterpSuspendCheck @ (self)
+ mov r0, #1 @ signal return to caller.
+MterpDone:
+ add sp, sp, #4 @ un-align 64
+ ldmfd sp!, {r4-r10,fp,pc} @ restore 9 regs and return
+
+
+ .fnend
+ .size ExecuteMterpImpl, .-ExecuteMterpImpl
+
+
diff --git a/runtime/interpreter/mterp/rebuild.sh b/runtime/interpreter/mterp/rebuild.sh
new file mode 100755
index 0000000000..a325fff93e
--- /dev/null
+++ b/runtime/interpreter/mterp/rebuild.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Rebuild for all known targets. Necessary until the stuff in "out" gets
+# generated as part of the build.
+#
+set -e
+
+# for arch in arm x86 mips arm64 x86_64 mips64; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
+for arch in arm; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 3c2898b8ac..f08a1a9d90 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -16,6 +16,13 @@
#include "debugger_interface.h"
+#include "base/logging.h"
+#include "base/mutex.h"
+#include "thread-inl.h"
+#include "thread.h"
+
+#include <unordered_map>
+
namespace art {
// -------------------------------------------------------------------
@@ -57,13 +64,19 @@ extern "C" {
JITDescriptor __jit_debug_descriptor = { 1, JIT_NOACTION, nullptr, nullptr };
}
-JITCodeEntry* CreateJITCodeEntry(const uint8_t *symfile_addr, uintptr_t symfile_size) {
+static Mutex g_jit_debug_mutex("JIT debug interface lock", kJitDebugInterfaceLock);
+
+static JITCodeEntry* CreateJITCodeEntryInternal(
+ std::unique_ptr<const uint8_t[]> symfile_addr,
+ uintptr_t symfile_size)
+ REQUIRES(g_jit_debug_mutex) {
+ DCHECK(symfile_addr.get() != nullptr);
+
JITCodeEntry* entry = new JITCodeEntry;
- entry->symfile_addr_ = symfile_addr;
+ entry->symfile_addr_ = symfile_addr.release();
entry->symfile_size_ = symfile_size;
entry->prev_ = nullptr;
- // TODO: Do we need a lock here?
entry->next_ = __jit_debug_descriptor.first_entry_;
if (entry->next_ != nullptr) {
entry->next_->prev_ = entry;
@@ -76,8 +89,7 @@ JITCodeEntry* CreateJITCodeEntry(const uint8_t *symfile_addr, uintptr_t symfile_
return entry;
}
-void DeleteJITCodeEntry(JITCodeEntry* entry) {
- // TODO: Do we need a lock here?
+static void DeleteJITCodeEntryInternal(JITCodeEntry* entry) REQUIRES(g_jit_debug_mutex) {
if (entry->prev_ != nullptr) {
entry->prev_->next_ = entry->next_;
} else {
@@ -91,7 +103,48 @@ void DeleteJITCodeEntry(JITCodeEntry* entry) {
__jit_debug_descriptor.relevant_entry_ = entry;
__jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
__jit_debug_register_code();
+ delete[] entry->symfile_addr_;
delete entry;
}
+JITCodeEntry* CreateJITCodeEntry(std::unique_ptr<const uint8_t[]> symfile_addr,
+ uintptr_t symfile_size) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, g_jit_debug_mutex);
+ return CreateJITCodeEntryInternal(std::move(symfile_addr), symfile_size);
+}
+
+void DeleteJITCodeEntry(JITCodeEntry* entry) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, g_jit_debug_mutex);
+ DeleteJITCodeEntryInternal(entry);
+}
+
+// Mapping from address to entry. It takes ownership of the entries
+// so that the user of the JIT interface does not have to store them.
+static std::unordered_map<uintptr_t, JITCodeEntry*> g_jit_code_entries;
+
+void CreateJITCodeEntryForAddress(uintptr_t address,
+ std::unique_ptr<const uint8_t[]> symfile_addr,
+ uintptr_t symfile_size) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, g_jit_debug_mutex);
+ DCHECK_NE(address, 0u);
+ DCHECK(g_jit_code_entries.find(address) == g_jit_code_entries.end());
+ JITCodeEntry* entry = CreateJITCodeEntryInternal(std::move(symfile_addr), symfile_size);
+ g_jit_code_entries.emplace(address, entry);
+}
+
+bool DeleteJITCodeEntryForAddress(uintptr_t address) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, g_jit_debug_mutex);
+ const auto& it = g_jit_code_entries.find(address);
+ if (it == g_jit_code_entries.end()) {
+ return false;
+ }
+ DeleteJITCodeEntryInternal(it->second);
+ g_jit_code_entries.erase(it);
+ return true;
+}
+
} // namespace art
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index a784ef5990..74469a98d0 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
#include <inttypes.h>
+#include <memory>
namespace art {
@@ -26,11 +27,25 @@ extern "C" {
}
// Notify native debugger about new JITed code by passing in-memory ELF.
-JITCodeEntry* CreateJITCodeEntry(const uint8_t *symfile_addr, uintptr_t symfile_size);
+// It takes ownership of the in-memory ELF file.
+JITCodeEntry* CreateJITCodeEntry(std::unique_ptr<const uint8_t[]> symfile_addr,
+ uintptr_t symfile_size);
// Notify native debugger that JITed code has been removed.
+// It also releases the associated in-memory ELF file.
void DeleteJITCodeEntry(JITCodeEntry* entry);
+// Notify native debugger about new JITed code by passing in-memory ELF.
+// The address is used only to uniquely identify the entry.
+// It takes ownership of the in-memory ELF file.
+void CreateJITCodeEntryForAddress(uintptr_t address,
+ std::unique_ptr<const uint8_t[]> symfile_addr,
+ uintptr_t symfile_size);
+
+// Notify native debugger that JITed code has been removed.
+// Returns false if entry for the given address was not found.
+bool DeleteJITCodeEntryForAddress(uintptr_t address);
+
} // namespace art
#endif // ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index ab70f4c158..05668a97b3 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -64,10 +64,14 @@ void Jit::AddTimingLogger(const TimingLogger& logger) {
cumulative_timings_.AddLogger(logger);
}
-Jit::Jit()
- : jit_library_handle_(nullptr), jit_compiler_handle_(nullptr), jit_load_(nullptr),
- jit_compile_method_(nullptr), dump_info_on_shutdown_(false),
- cumulative_timings_("JIT timings"), save_profiling_info_(false) {
+Jit::Jit() : jit_library_handle_(nullptr),
+ jit_compiler_handle_(nullptr),
+ jit_load_(nullptr),
+ jit_compile_method_(nullptr),
+ dump_info_on_shutdown_(false),
+ cumulative_timings_("JIT timings"),
+ save_profiling_info_(false),
+ generate_debug_info_(false) {
}
Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
@@ -77,7 +81,10 @@ Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
return nullptr;
}
jit->code_cache_.reset(JitCodeCache::Create(
- options->GetCodeCacheInitialCapacity(), options->GetCodeCacheMaxCapacity(), error_msg));
+ options->GetCodeCacheInitialCapacity(),
+ options->GetCodeCacheMaxCapacity(),
+ jit->generate_debug_info_,
+ error_msg));
if (jit->GetCodeCache() == nullptr) {
return nullptr;
}
@@ -99,7 +106,7 @@ bool Jit::LoadCompiler(std::string* error_msg) {
*error_msg = oss.str();
return false;
}
- jit_load_ = reinterpret_cast<void* (*)(CompilerCallbacks**)>(
+ jit_load_ = reinterpret_cast<void* (*)(CompilerCallbacks**, bool*)>(
dlsym(jit_library_handle_, "jit_load"));
if (jit_load_ == nullptr) {
dlclose(jit_library_handle_);
@@ -121,9 +128,10 @@ bool Jit::LoadCompiler(std::string* error_msg) {
return false;
}
CompilerCallbacks* callbacks = nullptr;
+ bool will_generate_debug_symbols = false;
VLOG(jit) << "Calling JitLoad interpreter_only="
<< Runtime::Current()->GetInstrumentation()->InterpretOnly();
- jit_compiler_handle_ = (jit_load_)(&callbacks);
+ jit_compiler_handle_ = (jit_load_)(&callbacks, &will_generate_debug_symbols);
if (jit_compiler_handle_ == nullptr) {
dlclose(jit_library_handle_);
*error_msg = "JIT couldn't load compiler";
@@ -136,6 +144,7 @@ bool Jit::LoadCompiler(std::string* error_msg) {
return false;
}
compiler_callbacks_ = callbacks;
+ generate_debug_info_ = will_generate_debug_symbols;
return true;
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 0edce2fa49..42bbbe73c7 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -86,7 +86,7 @@ class Jit {
// JIT compiler
void* jit_library_handle_;
void* jit_compiler_handle_;
- void* (*jit_load_)(CompilerCallbacks**);
+ void* (*jit_load_)(CompilerCallbacks**, bool*);
void (*jit_unload_)(void*);
bool (*jit_compile_method_)(void*, ArtMethod*, Thread*);
@@ -99,6 +99,7 @@ class Jit {
CompilerCallbacks* compiler_callbacks_; // Owned by the jit compiler.
bool save_profiling_info_;
+ bool generate_debug_info_;
DISALLOW_COPY_AND_ASSIGN(Jit);
};
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index c260ca4629..5db699b7fc 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -21,6 +21,7 @@
#include "art_method-inl.h"
#include "base/stl_util.h"
#include "base/time_utils.h"
+#include "debugger_interface.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/bitmap-inl.h"
#include "jit/profiling_info.h"
@@ -48,8 +49,16 @@ static constexpr int kProtCode = PROT_READ | PROT_EXEC;
JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
size_t max_capacity,
+ bool generate_debug_info,
std::string* error_msg) {
CHECK_GE(max_capacity, initial_capacity);
+
+ // Generating debug information is mostly for using the 'perf' tool, which does
+ // not work with ashmem.
+ bool use_ashmem = !generate_debug_info;
+ // With 'perf', we want a 1-1 mapping between an address and a method.
+ bool garbage_collect_code = !generate_debug_info;
+
// We need to have 32 bit offsets from method headers in code cache which point to things
// in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
// Ensure we're below 1 GB to be safe.
@@ -64,7 +73,7 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
std::string error_str;
// Map name specific for android_os_Debug.cpp accounting.
MemMap* data_map = MemMap::MapAnonymous(
- "data-code-cache", nullptr, max_capacity, kProtAll, false, false, &error_str);
+ "data-code-cache", nullptr, max_capacity, kProtAll, false, false, &error_str, use_ashmem);
if (data_map == nullptr) {
std::ostringstream oss;
oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
@@ -83,7 +92,8 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
DCHECK_EQ(code_size + data_size, max_capacity);
uint8_t* divider = data_map->Begin() + data_size;
- MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
+ MemMap* code_map =
+ data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str, use_ashmem);
if (code_map == nullptr) {
std::ostringstream oss;
oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
@@ -94,14 +104,16 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
data_size = initial_capacity / 2;
code_size = initial_capacity - data_size;
DCHECK_EQ(code_size + data_size, initial_capacity);
- return new JitCodeCache(code_map, data_map, code_size, data_size, max_capacity);
+ return new JitCodeCache(
+ code_map, data_map, code_size, data_size, max_capacity, garbage_collect_code);
}
JitCodeCache::JitCodeCache(MemMap* code_map,
MemMap* data_map,
size_t initial_code_capacity,
size_t initial_data_capacity,
- size_t max_capacity)
+ size_t max_capacity,
+ bool garbage_collect_code)
: lock_("Jit code cache", kJitCodeCacheLock),
lock_cond_("Jit code cache variable", lock_),
collection_in_progress_(false),
@@ -112,8 +124,10 @@ JitCodeCache::JitCodeCache(MemMap* code_map,
code_end_(initial_code_capacity),
data_end_(initial_data_capacity),
has_done_one_collection_(false),
- last_update_time_ns_(0) {
+ last_update_time_ns_(0),
+ garbage_collect_code_(garbage_collect_code) {
+ DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_end_, false /*locked*/);
@@ -215,6 +229,9 @@ void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UN
uintptr_t allocation = FromCodeToAllocation(code_ptr);
const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
const uint8_t* data = method_header->GetNativeGcMap();
+ // Notify native debugger that we are about to remove the code.
+ // It does nothing if we are not using native debugger.
+ DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
if (data != nullptr) {
mspace_free(data_mspace_, const_cast<uint8_t*>(data));
}
@@ -512,7 +529,11 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
// we hold the lock.
{
MutexLock mu(self, lock_);
- if (has_done_one_collection_ && IncreaseCodeCacheCapacity()) {
+ if (!garbage_collect_code_) {
+ IncreaseCodeCacheCapacity();
+ NotifyCollectionDone(self);
+ return;
+ } else if (has_done_one_collection_ && IncreaseCodeCacheCapacity()) {
has_done_one_collection_ = false;
NotifyCollectionDone(self);
return;
@@ -726,5 +747,10 @@ void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSE
info->SetIsMethodBeingCompiled(false);
}
+size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) {
+ MutexLock mu(Thread::Current(), lock_);
+ return mspace_usable_size(reinterpret_cast<const void*>(FromCodeToAllocation(ptr)));
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 1c842e4aa7..a152bcd2d4 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -53,7 +53,10 @@ class JitCodeCache {
// Create the code cache with a code + data capacity equal to "capacity", error message is passed
// in the out arg error_msg.
- static JitCodeCache* Create(size_t initial_capacity, size_t max_capacity, std::string* error_msg);
+ static JitCodeCache* Create(size_t initial_capacity,
+ size_t max_capacity,
+ bool generate_debug_info,
+ std::string* error_msg);
// Number of bytes allocated in the code cache.
size_t CodeCacheSize() REQUIRES(!lock_);
@@ -159,13 +162,16 @@ class JitCodeCache {
return current_capacity_;
}
+ size_t GetMemorySizeOfCodePointer(const void* ptr) REQUIRES(!lock_);
+
private:
// Take ownership of maps.
JitCodeCache(MemMap* code_map,
MemMap* data_map,
size_t initial_code_capacity,
size_t initial_data_capacity,
- size_t max_capacity);
+ size_t max_capacity,
+ bool garbage_collect_code);
// Internal version of 'CommitCode' that will not retry if the
// allocation fails. Return null if the allocation fails.
@@ -252,6 +258,9 @@ class JitCodeCache {
// It is atomic to avoid locking when reading it.
Atomic<uint64_t> last_update_time_ns_;
+ // Whether we can do garbage collection.
+ const bool garbage_collect_code_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc
index 5dc0e45234..a132701796 100644
--- a/runtime/jit/offline_profiling_info.cc
+++ b/runtime/jit/offline_profiling_info.cc
@@ -30,38 +30,40 @@
namespace art {
-void OfflineProfilingInfo::SaveProfilingInfo(const std::string& filename,
- const std::vector<ArtMethod*>& methods) {
+bool ProfileCompilationInfo::SaveProfilingInfo(const std::string& filename,
+ const std::vector<ArtMethod*>& methods) {
if (methods.empty()) {
VLOG(profiler) << "No info to save to " << filename;
- return;
+ return true;
}
- DexFileToMethodsMap info;
+ ProfileCompilationInfo info;
+ if (!info.Load(filename)) {
+ LOG(WARNING) << "Could not load previous profile data from file " << filename;
+ return false;
+ }
{
ScopedObjectAccess soa(Thread::Current());
for (auto it = methods.begin(); it != methods.end(); it++) {
- AddMethodInfo(*it, &info);
+ const DexFile* dex_file = (*it)->GetDexFile();
+ if (!info.AddData(dex_file->GetLocation(),
+ dex_file->GetLocationChecksum(),
+ (*it)->GetDexMethodIndex())) {
+ return false;
+ }
}
}
// This doesn't need locking because we are trying to lock the file for exclusive
// access and fail immediately if we can't.
- if (Serialize(filename, info)) {
+ bool result = info.Save(filename);
+ if (result) {
VLOG(profiler) << "Successfully saved profile info to " << filename
<< " Size: " << GetFileSizeBytes(filename);
+ } else {
+ VLOG(profiler) << "Failed to save profile info to " << filename;
}
-}
-
-void OfflineProfilingInfo::AddMethodInfo(ArtMethod* method, DexFileToMethodsMap* info) {
- DCHECK(method != nullptr);
- const DexFile* dex_file = method->GetDexFile();
-
- auto info_it = info->find(dex_file);
- if (info_it == info->end()) {
- info_it = info->Put(dex_file, std::set<uint32_t>());
- }
- info_it->second.insert(method->GetDexMethodIndex());
+ return result;
}
enum OpenMode {
@@ -77,9 +79,7 @@ static int OpenFile(const std::string& filename, OpenMode open_mode) {
break;
case READ_WRITE:
// TODO(calin) allow the shared uid of the app to access the file.
- fd = open(filename.c_str(),
- O_CREAT | O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC,
- S_IRUSR | S_IWUSR);
+ fd = open(filename.c_str(), O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC);
break;
}
@@ -137,8 +137,7 @@ static constexpr const char kLineSeparator = '\n';
* /system/priv-app/app/app.apk,131232145,11,23,454,54
* /system/priv-app/app/app.apk:classes5.dex,218490184,39,13,49,1
**/
-bool OfflineProfilingInfo::Serialize(const std::string& filename,
- const DexFileToMethodsMap& info) const {
+bool ProfileCompilationInfo::Save(const std::string& filename) {
int fd = OpenFile(filename, READ_WRITE);
if (fd == -1) {
return false;
@@ -148,14 +147,12 @@ bool OfflineProfilingInfo::Serialize(const std::string& filename,
// TODO(calin): Profile this and see how much memory it takes. If too much,
// write to file directly.
std::ostringstream os;
- for (auto it : info) {
- const DexFile* dex_file = it.first;
- const std::set<uint32_t>& method_dex_ids = it.second;
-
- os << dex_file->GetLocation()
- << kFieldSeparator
- << dex_file->GetLocationChecksum();
- for (auto method_it : method_dex_ids) {
+ for (const auto& it : info_) {
+ const std::string& dex_location = it.first;
+ const DexFileData& dex_data = it.second;
+
+ os << dex_location << kFieldSeparator << dex_data.checksum;
+ for (auto method_it : dex_data.method_set) {
os << kFieldSeparator << method_it;
}
os << kLineSeparator;
@@ -190,8 +187,22 @@ static void SplitString(const std::string& s, char separator, std::vector<std::s
}
}
-bool ProfileCompilationInfo::ProcessLine(const std::string& line,
- const std::vector<const DexFile*>& dex_files) {
+bool ProfileCompilationInfo::AddData(const std::string& dex_location,
+ uint32_t checksum,
+ uint16_t method_idx) {
+ auto info_it = info_.find(dex_location);
+ if (info_it == info_.end()) {
+ info_it = info_.Put(dex_location, DexFileData(checksum));
+ }
+ if (info_it->second.checksum != checksum) {
+ LOG(WARNING) << "Checksum mismatch for dex " << dex_location;
+ return false;
+ }
+ info_it->second.method_set.insert(method_idx);
+ return true;
+}
+
+bool ProfileCompilationInfo::ProcessLine(const std::string& line) {
std::vector<std::string> parts;
SplitString(line, kFieldSeparator, &parts);
if (parts.size() < 3) {
@@ -205,39 +216,13 @@ bool ProfileCompilationInfo::ProcessLine(const std::string& line,
return false;
}
- const DexFile* current_dex_file = nullptr;
- for (auto dex_file : dex_files) {
- if (dex_file->GetLocation() == dex_location) {
- if (checksum != dex_file->GetLocationChecksum()) {
- LOG(WARNING) << "Checksum mismatch for "
- << dex_file->GetLocation() << " when parsing " << filename_;
- return false;
- }
- current_dex_file = dex_file;
- break;
- }
- }
- if (current_dex_file == nullptr) {
- return true;
- }
-
for (size_t i = 2; i < parts.size(); i++) {
uint32_t method_idx;
if (!ParseInt(parts[i].c_str(), &method_idx)) {
LOG(WARNING) << "Cannot parse method_idx " << parts[i];
return false;
}
- uint16_t class_idx = current_dex_file->GetMethodId(method_idx).class_idx_;
- auto info_it = info_.find(current_dex_file);
- if (info_it == info_.end()) {
- info_it = info_.Put(current_dex_file, ClassToMethodsMap());
- }
- ClassToMethodsMap& class_map = info_it->second;
- auto class_it = class_map.find(class_idx);
- if (class_it == class_map.end()) {
- class_it = class_map.Put(class_idx, std::set<uint32_t>());
- }
- class_it->second.insert(method_idx);
+ AddData(dex_location, checksum, method_idx);
}
return true;
}
@@ -264,25 +249,8 @@ static int GetLineFromBuffer(char* buffer, int n, int start_from, std::string& l
return new_line_pos == -1 ? new_line_pos : new_line_pos + 1;
}
-bool ProfileCompilationInfo::Load(const std::vector<const DexFile*>& dex_files) {
- if (dex_files.empty()) {
- return true;
- }
- if (kIsDebugBuild) {
- // In debug builds verify that the locations are unique.
- std::set<std::string> locations;
- for (auto dex_file : dex_files) {
- const std::string& location = dex_file->GetLocation();
- DCHECK(locations.find(location) == locations.end())
- << "DexFiles appear to belong to different apks."
- << " There are multiple dex files with the same location: "
- << location;
- locations.insert(location);
- }
- }
- info_.clear();
-
- int fd = OpenFile(filename_, READ);
+bool ProfileCompilationInfo::Load(const std::string& filename) {
+ int fd = OpenFile(filename, READ);
if (fd == -1) {
return false;
}
@@ -295,7 +263,7 @@ bool ProfileCompilationInfo::Load(const std::vector<const DexFile*>& dex_files)
while (success) {
int n = read(fd, buffer, kBufferSize);
if (n < 0) {
- PLOG(WARNING) << "Error when reading profile file " << filename_;
+ PLOG(WARNING) << "Error when reading profile file " << filename;
success = false;
break;
} else if (n == 0) {
@@ -309,7 +277,7 @@ bool ProfileCompilationInfo::Load(const std::vector<const DexFile*>& dex_files)
if (current_start_pos == -1) {
break;
}
- if (!ProcessLine(current_line, dex_files)) {
+ if (!ProcessLine(current_line)) {
success = false;
break;
}
@@ -320,25 +288,50 @@ bool ProfileCompilationInfo::Load(const std::vector<const DexFile*>& dex_files)
if (!success) {
info_.clear();
}
- return CloseDescriptorForFile(fd, filename_) && success;
+ return CloseDescriptorForFile(fd, filename) && success;
+}
+
+bool ProfileCompilationInfo::Load(const ProfileCompilationInfo& other) {
+ for (const auto& other_it : other.info_) {
+ const std::string& other_dex_location = other_it.first;
+ const DexFileData& other_dex_data = other_it.second;
+
+ auto info_it = info_.find(other_dex_location);
+ if (info_it == info_.end()) {
+ info_it = info_.Put(other_dex_location, DexFileData(other_dex_data.checksum));
+ }
+ if (info_it->second.checksum != other_dex_data.checksum) {
+ LOG(WARNING) << "Checksum mismatch for dex " << other_dex_location;
+ return false;
+ }
+ info_it->second.method_set.insert(other_dex_data.method_set.begin(),
+ other_dex_data.method_set.end());
+ }
+ return true;
}
bool ProfileCompilationInfo::ContainsMethod(const MethodReference& method_ref) const {
- auto info_it = info_.find(method_ref.dex_file);
+ auto info_it = info_.find(method_ref.dex_file->GetLocation());
if (info_it != info_.end()) {
- uint16_t class_idx = method_ref.dex_file->GetMethodId(method_ref.dex_method_index).class_idx_;
- const ClassToMethodsMap& class_map = info_it->second;
- auto class_it = class_map.find(class_idx);
- if (class_it != class_map.end()) {
- const std::set<uint32_t>& methods = class_it->second;
- return methods.find(method_ref.dex_method_index) != methods.end();
+ if (method_ref.dex_file->GetLocationChecksum() != info_it->second.checksum) {
+ return false;
}
- return false;
+ const std::set<uint16_t>& methods = info_it->second.method_set;
+ return methods.find(method_ref.dex_method_index) != methods.end();
}
return false;
}
-std::string ProfileCompilationInfo::DumpInfo(bool print_full_dex_location) const {
+uint32_t ProfileCompilationInfo::GetNumberOfMethods() const {
+ uint32_t total = 0;
+ for (const auto& it : info_) {
+ total += it.second.method_set.size();
+ }
+ return total;
+}
+
+std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>* dex_files,
+ bool print_full_dex_location) const {
std::ostringstream os;
if (info_.empty()) {
return "ProfileInfo: empty";
@@ -346,17 +339,11 @@ std::string ProfileCompilationInfo::DumpInfo(bool print_full_dex_location) const
os << "ProfileInfo:";
- // Use an additional map to achieve a predefined order based on the dex locations.
- SafeMap<const std::string, const DexFile*> dex_locations_map;
- for (auto info_it : info_) {
- dex_locations_map.Put(info_it.first->GetLocation(), info_it.first);
- }
-
const std::string kFirstDexFileKeySubstitute = ":classes.dex";
- for (auto dex_file_it : dex_locations_map) {
+ for (const auto& it : info_) {
os << "\n";
- const std::string& location = dex_file_it.first;
- const DexFile* dex_file = dex_file_it.second;
+ const std::string& location = it.first;
+ const DexFileData& dex_data = it.second;
if (print_full_dex_location) {
os << location;
} else {
@@ -364,10 +351,19 @@ std::string ProfileCompilationInfo::DumpInfo(bool print_full_dex_location) const
std::string multidex_suffix = DexFile::GetMultiDexSuffix(location);
os << (multidex_suffix.empty() ? kFirstDexFileKeySubstitute : multidex_suffix);
}
- for (auto class_it : info_.find(dex_file)->second) {
- for (auto method_it : class_it.second) {
- os << "\n " << PrettyMethod(method_it, *dex_file, true);
+ for (const auto method_it : dex_data.method_set) {
+ if (dex_files != nullptr) {
+ const DexFile* dex_file = nullptr;
+ for (size_t i = 0; i < dex_files->size(); i++) {
+ if (location == (*dex_files)[i]->GetLocation()) {
+ dex_file = (*dex_files)[i];
+ }
+ }
+ if (dex_file != nullptr) {
+ os << "\n " << PrettyMethod(method_it, *dex_file, true);
+ }
}
+ os << "\n " << method_it;
}
}
return os.str();
diff --git a/runtime/jit/offline_profiling_info.h b/runtime/jit/offline_profiling_info.h
index 32d4c5bedc..26e1ac385f 100644
--- a/runtime/jit/offline_profiling_info.h
+++ b/runtime/jit/offline_profiling_info.h
@@ -29,60 +29,50 @@ namespace art {
class ArtMethod;
+// TODO: rename file.
/**
- * Profiling information in a format that can be serialized to disk.
- * It is a serialize-friendly format based on information collected
- * by the interpreter (ProfileInfo).
+ * Profile information in a format suitable to be queried by the compiler and
+ * performing profile guided compilation.
+ * It is a serialize-friendly format based on information collected by the
+ * interpreter (ProfileInfo).
* Currently it stores only the hot compiled methods.
*/
-class OfflineProfilingInfo {
- public:
- void SaveProfilingInfo(const std::string& filename, const std::vector<ArtMethod*>& methods);
-
- private:
- // Map identifying the location of the profiled methods.
- // dex_file_ -> [dex_method_index]+
- using DexFileToMethodsMap = SafeMap<const DexFile*, std::set<uint32_t>>;
-
- void AddMethodInfo(ArtMethod* method, DexFileToMethodsMap* info)
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool Serialize(const std::string& filename, const DexFileToMethodsMap& info) const;
-};
-
-/**
- * Profile information in a format suitable to be queried by the compiler and performing
- * profile guided compilation.
- */
class ProfileCompilationInfo {
public:
- // Constructs a ProfileCompilationInfo backed by the provided file.
- explicit ProfileCompilationInfo(const std::string& filename) : filename_(filename) {}
-
- // Loads profile information corresponding to the provided dex files.
- // The dex files' multidex suffixes must be unique.
- // This resets the state of the profiling information
- // (i.e. all previously loaded info are cleared).
- bool Load(const std::vector<const DexFile*>& dex_files);
+ static bool SaveProfilingInfo(const std::string& filename,
+ const std::vector<ArtMethod*>& methods);
+
+ // Loads profile information from the given file.
+ bool Load(const std::string& profile_filename);
+ // Loads the data from another ProfileCompilationInfo object.
+ bool Load(const ProfileCompilationInfo& info);
+ // Saves the profile data to the given file.
+ bool Save(const std::string& profile_filename);
+ // Returns the number of methods that were profiled.
+ uint32_t GetNumberOfMethods() const;
// Returns true if the method reference is present in the profiling info.
bool ContainsMethod(const MethodReference& method_ref) const;
- const std::string& GetFilename() const { return filename_; }
-
// Dumps all the loaded profile info into a string and returns it.
+ // If dex_files is not null then the method indices will be resolved to their
+ // names.
// This is intended for testing and debugging.
- std::string DumpInfo(bool print_full_dex_location = true) const;
+ std::string DumpInfo(const std::vector<const DexFile*>* dex_files,
+ bool print_full_dex_location = true) const;
private:
- bool ProcessLine(const std::string& line,
- const std::vector<const DexFile*>& dex_files);
+ bool AddData(const std::string& dex_location, uint32_t checksum, uint16_t method_idx);
+ bool ProcessLine(const std::string& line);
+
+ struct DexFileData {
+ explicit DexFileData(uint32_t location_checksum) : checksum(location_checksum) {}
+ uint32_t checksum;
+ std::set<uint16_t> method_set;
+ };
- using ClassToMethodsMap = SafeMap<uint32_t, std::set<uint32_t>>;
- // Map identifying the location of the profiled methods.
- // dex_file -> class_index -> [dex_method_index]+
- using DexFileToProfileInfoMap = SafeMap<const DexFile*, ClassToMethodsMap>;
+ using DexFileToProfileInfoMap = SafeMap<const std::string, DexFileData>;
- const std::string filename_;
DexFileToProfileInfoMap info_;
};
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 0278138d6e..ec289ea2b5 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -106,10 +106,9 @@ bool ProfileSaver::ProcessProfilingInfo() {
VLOG(profiler) << "Not enough information to save. Nr of methods: " << methods.size();
return false;
}
- offline_profiling_info_.SaveProfilingInfo(output_filename_, methods);
-
- VLOG(profiler) << "Saved profile time: " << PrettyDuration(NanoTime() - start);
+ ProfileCompilationInfo::SaveProfilingInfo(output_filename_, methods);
+ VLOG(profiler) << "Profile process time: " << PrettyDuration(NanoTime() - start);
return true;
}
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
index 88efd41156..d60142b205 100644
--- a/runtime/jit/profile_saver.h
+++ b/runtime/jit/profile_saver.h
@@ -66,7 +66,6 @@ class ProfileSaver {
const std::string output_filename_;
jit::JitCodeCache* jit_code_cache_;
const std::set<const std::string> tracked_dex_base_locations_;
- OfflineProfilingInfo offline_profiling_info_;
uint64_t code_cache_last_update_time_ns_;
bool shutting_down_ GUARDED_BY(Locks::profiler_lock_);
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index aa25f67bab..1ee1611ef7 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -59,6 +59,7 @@ JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in)
local_ref_cookie(IRT_FIRST_SEGMENT),
locals(kLocalsInitial, kLocalsMax, kLocal, false),
check_jni(false),
+ runtime_deleted(false),
critical(0),
monitors("monitors", kMonitorsInitial, kMonitorsMax) {
functions = unchecked_functions = GetJniNativeInterface();
@@ -67,6 +68,11 @@ JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in)
}
}
+void JNIEnvExt::SetFunctionsToRuntimeShutdownFunctions() {
+ functions = GetRuntimeShutdownNativeInterface();
+ runtime_deleted = true;
+}
+
JNIEnvExt::~JNIEnvExt() {
}
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index 2f8decf98f..d4accc342b 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -74,6 +74,9 @@ struct JNIEnvExt : public JNIEnv {
// Frequently-accessed fields cached from JavaVM.
bool check_jni;
+ // If we are a JNI env for a daemon thread with a deleted runtime.
+ bool runtime_deleted;
+
// How many nested "critical" JNI calls are we in?
int critical;
@@ -95,6 +98,9 @@ struct JNIEnvExt : public JNIEnv {
// Check that no monitors are held that have been acquired in this JNI "segment."
void CheckNoHeldMonitors() SHARED_REQUIRES(Locks::mutator_lock_);
+ // Set the functions to the runtime shutdown functions.
+ void SetFunctionsToRuntimeShutdownFunctions();
+
private:
// The constructor should not be called directly. It may leave the object in an erronuous state,
// and the result needs to be checked.
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index cb67ee3b39..c893a0fc9d 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2734,6 +2734,246 @@ const JNINativeInterface* GetJniNativeInterface() {
return &gJniNativeInterface;
}
+void (*gJniSleepForeverStub[])() = {
+ nullptr, // reserved0.
+ nullptr, // reserved1.
+ nullptr, // reserved2.
+ nullptr, // reserved3.
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+ SleepForever,
+};
+
+const JNINativeInterface* GetRuntimeShutdownNativeInterface() {
+ return reinterpret_cast<JNINativeInterface*>(&gJniSleepForeverStub);
+}
+
void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods,
jint method_count) {
ScopedLocalRef<jclass> c(env, env->FindClass(jni_class_name));
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index 48b10f5825..3429962d3f 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -30,6 +30,7 @@
namespace art {
const JNINativeInterface* GetJniNativeInterface();
+const JNINativeInterface* GetRuntimeShutdownNativeInterface();
// Similar to RegisterNatives except its passed a descriptor for a class name and failures are
// fatal.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index e133847b06..3571edb277 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -34,14 +34,11 @@
#include "thread-inl.h"
#include "utils.h"
-#define USE_ASHMEM 1
-
-#ifdef USE_ASHMEM
#include <cutils/ashmem.h>
+
#ifndef ANDROID_OS
#include <sys/resource.h>
#endif
-#endif
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
@@ -282,7 +279,8 @@ MemMap* MemMap::MapAnonymous(const char* name,
int prot,
bool low_4gb,
bool reuse,
- std::string* error_msg) {
+ std::string* error_msg,
+ bool use_ashmem) {
#ifndef __LP64__
UNUSED(low_4gb);
#endif
@@ -303,17 +301,17 @@ MemMap* MemMap::MapAnonymous(const char* name,
ScopedFd fd(-1);
-#ifdef USE_ASHMEM
-#ifdef __ANDROID__
- const bool use_ashmem = true;
-#else
- // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't
- // fail due to ulimit restrictions. If they will then use a regular mmap.
- struct rlimit rlimit_fsize;
- CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
- const bool use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
- (page_aligned_byte_count < rlimit_fsize.rlim_cur);
-#endif
+ if (use_ashmem) {
+ if (!kIsTargetBuild) {
+ // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't
+ // fail due to ulimit restrictions. If they will then use a regular mmap.
+ struct rlimit rlimit_fsize;
+ CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
+ use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
+ (page_aligned_byte_count < rlimit_fsize.rlim_cur);
+ }
+ }
+
if (use_ashmem) {
// android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
// prefixed "dalvik-".
@@ -326,7 +324,6 @@ MemMap* MemMap::MapAnonymous(const char* name,
}
flags &= ~MAP_ANONYMOUS;
}
-#endif
// We need to store and potentially set an error number for pretty printing of errors
int saved_errno = 0;
@@ -508,7 +505,7 @@ MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_
}
MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
- std::string* error_msg) {
+ std::string* error_msg, bool use_ashmem) {
DCHECK_GE(new_end, Begin());
DCHECK_LE(new_end, End());
DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
@@ -532,23 +529,22 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro
DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
DCHECK_ALIGNED(tail_base_size, kPageSize);
-#ifdef USE_ASHMEM
- // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
- // prefixed "dalvik-".
- std::string debug_friendly_name("dalvik-");
- debug_friendly_name += tail_name;
- ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
- int flags = MAP_PRIVATE | MAP_FIXED;
- if (fd.get() == -1) {
- *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
- tail_name, strerror(errno));
- return nullptr;
- }
-#else
- ScopedFd fd(-1);
+ int int_fd = -1;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
-#endif
-
+ if (use_ashmem) {
+ // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
+ // prefixed "dalvik-".
+ std::string debug_friendly_name("dalvik-");
+ debug_friendly_name += tail_name;
+ int_fd = ashmem_create_region(debug_friendly_name.c_str(), tail_base_size);
+ flags = MAP_PRIVATE | MAP_FIXED;
+ if (int_fd == -1) {
+ *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
+ tail_name, strerror(errno));
+ return nullptr;
+ }
+ }
+ ScopedFd fd(int_fd);
MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
// Unmap/map the tail region.
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index efce09ae94..ed213659e3 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -57,17 +57,18 @@ class MemMap {
// "reuse" allows re-mapping an address range from an existing mapping.
//
// The word "anonymous" in this context means "not backed by a file". The supplied
- // 'ashmem_name' will be used -- on systems that support it -- to give the mapping
+ // 'name' will be used -- on systems that support it -- to give the mapping
// a name.
//
// On success, returns returns a MemMap instance. On failure, returns null.
- static MemMap* MapAnonymous(const char* ashmem_name,
+ static MemMap* MapAnonymous(const char* name,
uint8_t* addr,
size_t byte_count,
int prot,
bool low_4gb,
bool reuse,
- std::string* error_msg);
+ std::string* error_msg,
+ bool use_ashmem = true);
// Create placeholder for a region allocated by direct call to mmap.
// This is useful when we do not have control over the code calling mmap,
@@ -168,7 +169,8 @@ class MemMap {
MemMap* RemapAtEnd(uint8_t* new_end,
const char* tail_name,
int tail_prot,
- std::string* error_msg);
+ std::string* error_msg,
+ bool use_ashmem = true);
static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
REQUIRES(!Locks::mem_maps_lock_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index ef4fe15cc1..53118e07e1 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -616,6 +616,7 @@ inline uint32_t Class::GetAccessFlags() {
<< " IsErroneous=" <<
IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()
<< " IsString=" << (this == String::GetJavaLangString())
+ << " status= " << GetStatus<kVerifyFlags>()
<< " descriptor=" << PrettyDescriptor(this);
return GetField32<kVerifyFlags>(AccessFlagsOffset());
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 66060f2221..b49fc7494f 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -538,6 +538,71 @@ ArtMethod* Class::FindVirtualMethod(
return nullptr;
}
+ArtMethod* Class::FindVirtualMethodForInterfaceSuper(ArtMethod* method, size_t pointer_size) {
+ DCHECK(method->GetDeclaringClass()->IsInterface());
+ DCHECK(IsInterface()) << "Should only be called on a interface class";
+ // Check if we have one defined on this interface first. This includes searching copied ones to
+ // get any conflict methods. Conflict methods are copied into each subtype from the supertype. We
+ // don't do any indirect method checks here.
+ for (ArtMethod& iface_method : GetVirtualMethods(pointer_size)) {
+ if (method->HasSameNameAndSignature(&iface_method)) {
+ return &iface_method;
+ }
+ }
+
+ std::vector<ArtMethod*> abstract_methods;
+ // Search through the IFTable for a working version. We don't need to check for conflicts
+ // because if there was one it would appear in this classes virtual_methods_ above.
+
+ Thread* self = Thread::Current();
+ StackHandleScope<2> hs(self);
+ MutableHandle<mirror::IfTable> iftable(hs.NewHandle(GetIfTable()));
+ MutableHandle<mirror::Class> iface(hs.NewHandle<mirror::Class>(nullptr));
+ size_t iftable_count = GetIfTableCount();
+ // Find the method. We don't need to check for conflicts because they would have been in the
+ // copied virtuals of this interface. Order matters, traverse in reverse topological order; most
+ // subtypiest interfaces get visited first.
+ for (size_t k = iftable_count; k != 0;) {
+ k--;
+ DCHECK_LT(k, iftable->Count());
+ iface.Assign(iftable->GetInterface(k));
+ // Iterate through every declared method on this interface. Each direct method's name/signature
+ // is unique so the order of the inner loop doesn't matter.
+ for (auto& method_iter : iface->GetDeclaredVirtualMethods(pointer_size)) {
+ ArtMethod* current_method = &method_iter;
+ if (current_method->HasSameNameAndSignature(method)) {
+ if (current_method->IsDefault()) {
+ // Handle JLS soft errors, a default method from another superinterface tree can
+ // "override" an abstract method(s) from another superinterface tree(s). To do this,
+ // ignore any [default] method which are dominated by the abstract methods we've seen so
+ // far. Check if overridden by any in abstract_methods. We do not need to check for
+ // default_conflicts because we would hit those before we get to this loop.
+ bool overridden = false;
+ for (ArtMethod* possible_override : abstract_methods) {
+ DCHECK(possible_override->HasSameNameAndSignature(current_method));
+ if (iface->IsAssignableFrom(possible_override->GetDeclaringClass())) {
+ overridden = true;
+ break;
+ }
+ }
+ if (!overridden) {
+ return current_method;
+ }
+ } else {
+ // Is not default.
+ // This might override another default method. Just stash it for now.
+ abstract_methods.push_back(current_method);
+ }
+ }
+ }
+ }
+ // If we reach here we either never found any declaration of the method (in which case
+ // 'abstract_methods' is empty or we found no non-overriden default methods in which case
+ // 'abstract_methods' contains a number of abstract implementations of the methods. We choose one
+ // of these arbitrarily.
+ return abstract_methods.empty() ? nullptr : abstract_methods[0];
+}
+
ArtMethod* Class::FindClassInitializer(size_t pointer_size) {
for (ArtMethod& method : GetDirectMethods(pointer_size)) {
if (method.IsClassInitializer()) {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 489c269acc..3a06b8240d 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -848,6 +848,11 @@ class MANAGED Class FINAL : public Object {
ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Given a method from some implementor of this interface, return the specific implementation
+ // method for this class.
+ ArtMethod* FindVirtualMethodForInterfaceSuper(ArtMethod* method, size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Given a method implemented by this class, but potentially from a
// super class or interface, return the specific implementation
// method for this class.
@@ -1058,7 +1063,7 @@ class MANAGED Class FINAL : public Object {
SHARED_REQUIRES(Locks::mutator_lock_);
pid_t GetClinitThreadId() SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(IsIdxLoaded() || IsErroneous());
+ DCHECK(IsIdxLoaded() || IsErroneous()) << PrettyClass(this);
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_));
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 4b24f821cb..da4a891ff5 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -565,8 +565,8 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
*/
static void VMRuntime_registerAppInfo(JNIEnv* env,
jclass clazz ATTRIBUTE_UNUSED,
- jstring pkg_name,
- jstring app_dir,
+ jstring profile_file,
+ jstring app_dir ATTRIBUTE_UNUSED, // TODO: remove argument
jobjectArray code_paths) {
std::vector<std::string> code_paths_vec;
int code_paths_length = env->GetArrayLength(code_paths);
@@ -577,13 +577,11 @@ static void VMRuntime_registerAppInfo(JNIEnv* env,
env->ReleaseStringUTFChars(code_path, raw_code_path);
}
- const char* raw_app_dir = env->GetStringUTFChars(app_dir, nullptr);
- const char* raw_pkg_name = env->GetStringUTFChars(pkg_name, nullptr);
- std::string profile_file = StringPrintf("%s/code_cache/%s.prof", raw_app_dir, raw_pkg_name);
- env->ReleaseStringUTFChars(pkg_name, raw_pkg_name);
- env->ReleaseStringUTFChars(app_dir, raw_app_dir);
+ const char* raw_profile_file = env->GetStringUTFChars(profile_file, nullptr);
+ std::string profile_file_str(raw_profile_file);
+ env->ReleaseStringUTFChars(profile_file, raw_profile_file);
- Runtime::Current()->RegisterAppInfo(code_paths_vec, profile_file);
+ Runtime::Current()->RegisterAppInfo(code_paths_vec, profile_file_str);
}
static jboolean VMRuntime_isBootClassPathOnDisk(JNIEnv* env, jclass, jstring java_instruction_set) {
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 19774811bc..e89c74da23 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -288,13 +288,6 @@ static jobject Class_getPublicFieldRecursive(JNIEnv* env, jobject javaThis, jstr
GetPublicFieldRecursive(soa.Self(), DecodeClass(soa, javaThis), name_string));
}
-static jobject Class_getDeclaredFieldInternal(JNIEnv* env, jobject javaThis, jstring name) {
- ScopedFastNativeObjectAccess soa(env);
- auto* name_string = soa.Decode<mirror::String*>(name);
- return soa.AddLocalReference<jobject>(
- GetDeclaredField(soa.Self(), DecodeClass(soa, javaThis), name_string));
-}
-
static jobject Class_getDeclaredField(JNIEnv* env, jobject javaThis, jstring name) {
ScopedFastNativeObjectAccess soa(env);
auto* name_string = soa.Decode<mirror::String*>(name);
@@ -306,6 +299,12 @@ static jobject Class_getDeclaredField(JNIEnv* env, jobject javaThis, jstring nam
mirror::Field* result = GetDeclaredField(soa.Self(), klass, name_string);
if (result == nullptr) {
std::string name_str = name_string->ToModifiedUtf8();
+ if (name_str == "value" && klass->IsStringClass()) {
+ // We log the error for this specific case, as the user might just swallow the exception.
+ // This helps diagnose crashes when applications rely on the String#value field being
+ // there.
+ LOG(ERROR) << "The String#value field is not present on Android versions >= 6.0";
+ }
// We may have a pending exception if we failed to resolve.
if (!soa.Self()->IsExceptionPending()) {
ThrowNoSuchFieldException(DecodeClass(soa, javaThis), name_str.c_str());
@@ -723,7 +722,6 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(Class, getDeclaredConstructorsInternal, "!(Z)[Ljava/lang/reflect/Constructor;"),
NATIVE_METHOD(Class, getDeclaredField, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
NATIVE_METHOD(Class, getPublicFieldRecursive, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
- NATIVE_METHOD(Class, getDeclaredFieldInternal, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
NATIVE_METHOD(Class, getDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
NATIVE_METHOD(Class, getDeclaredFieldsUnchecked, "!(Z)[Ljava/lang/reflect/Field;"),
NATIVE_METHOD(Class, getDeclaredMethodInternal,
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 8543ff4ef9..d6b08684b9 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -952,7 +952,6 @@ const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() {
Runtime* runtime = Runtime::Current();
std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
if (!image_spaces.empty()) {
- // TODO: Better support multi-images? b/26317072
cached_image_info_.location = image_spaces[0]->GetImageLocation();
if (isa_ == kRuntimeISA) {
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 9cb37eed58..786cf06e2d 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -221,18 +221,22 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
CodeInfo code_info = handler_method_header_->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
+ // Find stack map of the catch block.
+ StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc(), encoding);
+ DCHECK(catch_stack_map.IsValid());
+ DexRegisterMap catch_vreg_map =
+ code_info.GetDexRegisterMapOf(catch_stack_map, encoding, number_of_vregs);
+ if (!catch_vreg_map.IsValid()) {
+ return;
+ }
+
// Find stack map of the throwing instruction.
StackMap throw_stack_map =
code_info.GetStackMapForNativePcOffset(stack_visitor->GetNativePcOffset(), encoding);
DCHECK(throw_stack_map.IsValid());
DexRegisterMap throw_vreg_map =
code_info.GetDexRegisterMapOf(throw_stack_map, encoding, number_of_vregs);
-
- // Find stack map of the catch block.
- StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc(), encoding);
- DCHECK(catch_stack_map.IsValid());
- DexRegisterMap catch_vreg_map =
- code_info.GetDexRegisterMapOf(catch_stack_map, encoding, number_of_vregs);
+ DCHECK(throw_vreg_map.IsValid());
// Copy values between them.
for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
@@ -387,6 +391,10 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
number_of_vregs)
: code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+ if (!vreg_map.IsValid()) {
+ return;
+ }
+
for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
if (updated_vregs != nullptr && updated_vregs[vreg]) {
// Keep the value set by debugger.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index dfb28776f5..c4694ee291 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -211,9 +211,11 @@ Runtime::Runtime()
safe_mode_(false) {
CheckAsmSupportOffsetsAndSizes();
std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
+ interpreter::CheckInterpreterAsmConstants();
}
Runtime::~Runtime() {
+ ATRACE_BEGIN("Runtime shutdown");
if (is_native_bridge_loaded_) {
UnloadNativeBridge();
}
@@ -228,45 +230,55 @@ Runtime::~Runtime() {
Thread* self = Thread::Current();
const bool attach_shutdown_thread = self == nullptr;
if (attach_shutdown_thread) {
+ ATRACE_BEGIN("Attach shutdown thread");
CHECK(AttachCurrentThread("Shutdown thread", false, nullptr, false));
+ ATRACE_END();
self = Thread::Current();
} else {
LOG(WARNING) << "Current thread not detached in Runtime shutdown";
}
{
+ ATRACE_BEGIN("Wait for shutdown cond");
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
shutting_down_started_ = true;
while (threads_being_born_ > 0) {
shutdown_cond_->Wait(self);
}
shutting_down_ = true;
+ ATRACE_END();
}
// Shutdown and wait for the daemons.
CHECK(self != nullptr);
if (IsFinishedStarting()) {
+ ATRACE_BEGIN("Waiting for Daemons");
self->ClearException();
self->GetJniEnv()->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
WellKnownClasses::java_lang_Daemons_stop);
+ ATRACE_END();
}
Trace::Shutdown();
if (attach_shutdown_thread) {
+ ATRACE_BEGIN("Detach shutdown thread");
DetachCurrentThread();
+ ATRACE_END();
self = nullptr;
}
// Make sure to let the GC complete if it is running.
heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
heap_->DeleteThreadPool();
- if (jit_.get() != nullptr) {
+ if (jit_ != nullptr) {
+ ATRACE_BEGIN("Delete jit");
VLOG(jit) << "Deleting jit thread pool";
// Delete thread pool before the thread list since we don't want to wait forever on the
// JIT compiler threads.
jit_->DeleteThreadPool();
// Similarly, stop the profile saver thread before deleting the thread list.
jit_->StopProfileSaver();
+ ATRACE_END();
}
// Make sure our internal threads are dead before we start tearing down things they're using.
@@ -274,11 +286,13 @@ Runtime::~Runtime() {
delete signal_catcher_;
// Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
+ ATRACE_BEGIN("Delete thread list");
delete thread_list_;
+ ATRACE_END();
// Delete the JIT after thread list to ensure that there is no remaining threads which could be
// accessing the instrumentation when we delete it.
- if (jit_.get() != nullptr) {
+ if (jit_ != nullptr) {
VLOG(jit) << "Deleting jit";
jit_.reset(nullptr);
}
@@ -286,6 +300,7 @@ Runtime::~Runtime() {
// Shutdown the fault manager if it was initialized.
fault_manager.Shutdown();
+ ATRACE_BEGIN("Delete state");
delete monitor_list_;
delete monitor_pool_;
delete class_linker_;
@@ -302,10 +317,12 @@ Runtime::~Runtime() {
low_4gb_arena_pool_.reset();
arena_pool_.reset();
MemMap::Shutdown();
+ ATRACE_END();
// TODO: acquire a static mutex on Runtime to avoid racing.
CHECK(instance_ == nullptr || instance_ == this);
instance_ = nullptr;
+ ATRACE_END();
}
struct AbortState {
@@ -543,12 +560,9 @@ bool Runtime::Start() {
// Use !IsAotCompiler so that we get test coverage, tests are never the zygote.
if (!IsAotCompiler()) {
ScopedObjectAccess soa(self);
- std::vector<gc::space::ImageSpace*> image_spaces = heap_->GetBootImageSpaces();
- for (gc::space::ImageSpace* image_space : image_spaces) {
- ATRACE_BEGIN("AddImageStringsToTable");
- GetInternTable()->AddImageStringsToTable(image_space);
- ATRACE_END();
- }
+ ATRACE_BEGIN("AddImageStringsToTable");
+ GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces());
+ ATRACE_END();
ATRACE_BEGIN("MoveImageClassesToClassTable");
GetClassLinker()->AddBootImageClassesToClassTable();
ATRACE_END();
@@ -1259,11 +1273,14 @@ void Runtime::InitNativeMethods() {
}
}
{
+ constexpr const char* kOpenJdkLibrary = kIsDebugBuild
+ ? "libopenjdkd.so"
+ : "libopenjdk.so";
std::string error_msg;
- if (!java_vm_->LoadNativeLibrary(env, "libopenjdk.so", nullptr,
+ if (!java_vm_->LoadNativeLibrary(env, kOpenJdkLibrary, nullptr,
/* is_shared_namespace */ false,
nullptr, nullptr, &error_msg)) {
- LOG(FATAL) << "LoadNativeLibrary failed for \"libopenjdk.so\": " << error_msg;
+ LOG(FATAL) << "LoadNativeLibrary failed for \"" << kOpenJdkLibrary << "\": " << error_msg;
}
}
@@ -1683,13 +1700,29 @@ void Runtime::SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type) {
void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
const std::string& profile_output_filename) {
- VLOG(profiler) << "Register app with " << profile_output_filename_
+ if (jit_.get() == nullptr) {
+ // We are not JITing. Nothing to do.
+ return;
+ }
+
+ VLOG(profiler) << "Register app with " << profile_output_filename
<< " " << Join(code_paths, ':');
- DCHECK(!profile_output_filename.empty());
- profile_output_filename_ = profile_output_filename;
- if (jit_.get() != nullptr && !profile_output_filename.empty() && !code_paths.empty()) {
- jit_->StartProfileSaver(profile_output_filename, code_paths);
+
+ if (profile_output_filename.empty()) {
+ LOG(WARNING) << "JIT profile information will not be recorded: profile filename is empty.";
+ return;
}
+ if (!FileExists(profile_output_filename)) {
+ LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exits.";
+ return;
+ }
+ if (code_paths.empty()) {
+ LOG(WARNING) << "JIT profile information will not be recorded: code paths is empty.";
+ return;
+ }
+
+ profile_output_filename_ = profile_output_filename;
+ jit_->StartProfileSaver(profile_output_filename, code_paths);
}
// Transaction support.
diff --git a/runtime/safe_map.h b/runtime/safe_map.h
index 4e62dda8dd..a8b48ee4dc 100644
--- a/runtime/safe_map.h
+++ b/runtime/safe_map.h
@@ -146,7 +146,7 @@ bool operator!=(const SafeMap<K, V, Comparator, Allocator>& lhs,
template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
class AllocationTrackingSafeMap : public SafeMap<
- Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>> {
+ Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>> {
};
} // namespace art
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 9098d38bb0..5faff93b97 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -322,6 +322,9 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin
number_of_dex_registers)
: code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+ if (!dex_register_map.IsValid()) {
+ return false;
+ }
DexRegisterLocation::Kind location_kind =
dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info, encoding);
switch (location_kind) {
diff --git a/runtime/stack.h b/runtime/stack.h
index a0c44cb24f..4fa1a4f439 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -184,11 +184,12 @@ class ShadowFrame {
}
uint32_t GetDexPC() const {
- return dex_pc_;
+ return (dex_pc_ptr_ == nullptr) ? dex_pc_ : dex_pc_ptr_ - code_item_->insns_;
}
void SetDexPC(uint32_t dex_pc) {
dex_pc_ = dex_pc;
+ dex_pc_ptr_ = nullptr;
}
ShadowFrame* GetLink() const {
@@ -206,6 +207,20 @@ class ShadowFrame {
return *reinterpret_cast<const int32_t*>(vreg);
}
+ uint32_t* GetVRegAddr(size_t i) {
+ return &vregs_[i];
+ }
+
+ uint32_t* GetShadowRefAddr(size_t i) {
+ DCHECK(HasReferenceArray());
+ DCHECK_LT(i, NumberOfVRegs());
+ return &vregs_[i + NumberOfVRegs()];
+ }
+
+ void SetCodeItem(const DexFile::CodeItem* code_item) {
+ code_item_ = code_item;
+ }
+
float GetVRegFloat(size_t i) const {
DCHECK_LT(i, NumberOfVRegs());
// NOTE: Strict-aliasing?
@@ -346,6 +361,10 @@ class ShadowFrame {
return lock_count_data_;
}
+ static size_t LockCountDataOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_);
+ }
+
static size_t LinkOffset() {
return OFFSETOF_MEMBER(ShadowFrame, link_);
}
@@ -366,6 +385,18 @@ class ShadowFrame {
return OFFSETOF_MEMBER(ShadowFrame, vregs_);
}
+ static size_t ResultRegisterOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, result_register_);
+ }
+
+ static size_t DexPCPtrOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_);
+ }
+
+ static size_t CodeItemOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, code_item_);
+ }
+
// Create ShadowFrame for interpreter using provided memory.
static ShadowFrame* CreateShadowFrameImpl(uint32_t num_vregs,
ShadowFrame* link,
@@ -375,10 +406,19 @@ class ShadowFrame {
return new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true);
}
+ uint16_t* GetDexPCPtr() {
+ return dex_pc_ptr_;
+ }
+
+ JValue* GetResultRegister() {
+ return result_register_;
+ }
+
private:
ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method,
uint32_t dex_pc, bool has_reference_array)
- : number_of_vregs_(num_vregs), link_(link), method_(method), dex_pc_(dex_pc) {
+ : link_(link), method_(method), result_register_(nullptr), dex_pc_ptr_(nullptr),
+ code_item_(nullptr), number_of_vregs_(num_vregs), dex_pc_(dex_pc) {
// TODO(iam): Remove this parameter, it's an an artifact of portable removal
DCHECK(has_reference_array);
if (has_reference_array) {
@@ -399,12 +439,15 @@ class ShadowFrame {
const_cast<const ShadowFrame*>(this)->References());
}
- const uint32_t number_of_vregs_;
// Link to previous shadow frame or null.
ShadowFrame* link_;
ArtMethod* method_;
- uint32_t dex_pc_;
+ JValue* result_register_;
+ uint16_t* dex_pc_ptr_;
+ const DexFile::CodeItem* code_item_;
LockCountData lock_count_data_; // This may contain GC roots when lock counting is active.
+ const uint32_t number_of_vregs_;
+ uint32_t dex_pc_;
// This is a two-part array:
// - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index a15a08180e..84185ce49f 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -473,6 +473,9 @@ class DexRegisterLocationCatalog {
class DexRegisterMap {
public:
explicit DexRegisterMap(MemoryRegion region) : region_(region) {}
+ DexRegisterMap() {}
+
+ bool IsValid() const { return region_.pointer() != nullptr; }
// Get the surface kind of Dex register `dex_register_number`.
DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_number,
@@ -1136,11 +1139,14 @@ class CodeInfo {
DexRegisterMap GetDexRegisterMapOf(StackMap stack_map,
const StackMapEncoding& encoding,
uint32_t number_of_dex_registers) const {
- DCHECK(stack_map.HasDexRegisterMap(encoding));
- uint32_t offset = GetDexRegisterMapsOffset(encoding)
- + stack_map.GetDexRegisterMapOffset(encoding);
- size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
- return DexRegisterMap(region_.Subregion(offset, size));
+ if (!stack_map.HasDexRegisterMap(encoding)) {
+ return DexRegisterMap();
+ } else {
+ uint32_t offset = GetDexRegisterMapsOffset(encoding)
+ + stack_map.GetDexRegisterMapOffset(encoding);
+ size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
+ return DexRegisterMap(region_.Subregion(offset, size));
+ }
}
// Return the `DexRegisterMap` pointed by `inline_info` at depth `depth`.
@@ -1148,11 +1154,14 @@ class CodeInfo {
InlineInfo inline_info,
const StackMapEncoding& encoding,
uint32_t number_of_dex_registers) const {
- DCHECK(inline_info.HasDexRegisterMapAtDepth(depth));
- uint32_t offset = GetDexRegisterMapsOffset(encoding)
- + inline_info.GetDexRegisterMapOffsetAtDepth(depth);
- size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
- return DexRegisterMap(region_.Subregion(offset, size));
+ if (!inline_info.HasDexRegisterMapAtDepth(depth)) {
+ return DexRegisterMap();
+ } else {
+ uint32_t offset = GetDexRegisterMapsOffset(encoding)
+ + inline_info.GetDexRegisterMapOffsetAtDepth(depth);
+ size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
+ return DexRegisterMap(region_.Subregion(offset, size));
+ }
}
InlineInfo GetInlineInfoOf(StackMap stack_map, const StackMapEncoding& encoding) const {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 13e3774ca0..21241d240b 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -76,6 +76,7 @@
#include "verify_object-inl.h"
#include "vmap_table.h"
#include "well_known_classes.h"
+#include "interpreter/interpreter.h"
#if ART_USE_FUTEXES
#include "linux/futex.h"
@@ -686,6 +687,7 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en
RemoveSuspendTrigger();
InitCardTable();
InitTid();
+ interpreter::InitInterpreterTls(this);
#ifdef __ANDROID__
__get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
diff --git a/runtime/thread.h b/runtime/thread.h
index 6cb895c771..b25bcb29bf 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -601,6 +601,24 @@ class Thread {
}
template<size_t pointer_size>
+ static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(
+ OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
+ }
+
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(
+ OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
+ }
+
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(
+ OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
+ }
+
+ template<size_t pointer_size>
static ThreadOffset<pointer_size> ExceptionOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
}
@@ -1001,6 +1019,30 @@ class Thread {
void ProtectStack();
bool UnprotectStack();
+ void SetMterpDefaultIBase(void* ibase) {
+ tlsPtr_.mterp_default_ibase = ibase;
+ }
+
+ void SetMterpCurrentIBase(void* ibase) {
+ tlsPtr_.mterp_current_ibase = ibase;
+ }
+
+ void SetMterpAltIBase(void* ibase) {
+ tlsPtr_.mterp_alt_ibase = ibase;
+ }
+
+ const void* GetMterpDefaultIBase() const {
+ return tlsPtr_.mterp_default_ibase;
+ }
+
+ const void* GetMterpCurrentIBase() const {
+ return tlsPtr_.mterp_current_ibase;
+ }
+
+ const void* GetMterpAltIBase() const {
+ return tlsPtr_.mterp_alt_ibase;
+ }
+
void NoteSignalBeingHandled() {
if (tls32_.handling_signal_) {
LOG(FATAL) << "Detected signal while processing a signal";
@@ -1246,6 +1288,7 @@ class Thread {
frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
+ mterp_current_ibase(nullptr), mterp_default_ibase(nullptr), mterp_alt_ibase(nullptr),
thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr),
thread_local_mark_stack(nullptr) {
@@ -1364,6 +1407,11 @@ class Thread {
uint8_t* thread_local_end;
size_t thread_local_objects;
+ // Mterp jump table bases.
+ void* mterp_current_ibase;
+ void* mterp_default_ibase;
+ void* mterp_alt_ibase;
+
// There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
void* rosalloc_runs[kNumRosAllocThreadLocalSizeBrackets];
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index a390908635..fc1a4450d3 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -69,6 +69,7 @@ ThreadList::ThreadList()
}
ThreadList::~ThreadList() {
+ ATRACE_BEGIN(__FUNCTION__);
// Detach the current thread if necessary. If we failed to start, there might not be any threads.
// We need to detach the current thread here in case there's another thread waiting to join with
// us.
@@ -79,19 +80,27 @@ ThreadList::~ThreadList() {
contains = Contains(self);
}
if (contains) {
+ ATRACE_BEGIN("DetachCurrentThread");
Runtime::Current()->DetachCurrentThread();
+ ATRACE_END();
}
+ ATRACE_BEGIN("WaitForOtherNonDaemonThreadsToExit");
WaitForOtherNonDaemonThreadsToExit();
+ ATRACE_END();
// Disable GC and wait for GC to complete in case there are still daemon threads doing
// allocations.
gc::Heap* const heap = Runtime::Current()->GetHeap();
heap->DisableGCForShutdown();
// In case a GC is in progress, wait for it to finish.
+ ATRACE_BEGIN("WaitForGcToComplete");
heap->WaitForGcToComplete(gc::kGcCauseBackground, Thread::Current());
-
+ ATRACE_END();
// TODO: there's an unaddressed race here where a thread may attach during shutdown, see
// Thread::Init.
- SuspendAllDaemonThreads();
+ ATRACE_BEGIN("SuspendAllDaemonThreadsForShutdown");
+ SuspendAllDaemonThreadsForShutdown();
+ ATRACE_END();
+ ATRACE_END();
}
bool ThreadList::Contains(Thread* thread) {
@@ -1133,9 +1142,10 @@ void ThreadList::WaitForOtherNonDaemonThreadsToExit() {
}
}
-void ThreadList::SuspendAllDaemonThreads() {
+void ThreadList::SuspendAllDaemonThreadsForShutdown() {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::thread_list_lock_);
+ size_t daemons_left = 0;
{ // Tell all the daemons it's time to suspend.
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
for (const auto& thread : list_) {
@@ -1144,13 +1154,26 @@ void ThreadList::SuspendAllDaemonThreads() {
CHECK(thread->IsDaemon()) << *thread;
if (thread != self) {
thread->ModifySuspendCount(self, +1, nullptr, false);
+ ++daemons_left;
}
+ // We are shutting down the runtime, set the JNI functions of all the JNIEnvs to be
+ // the sleep forever one.
+ thread->GetJniEnv()->SetFunctionsToRuntimeShutdownFunctions();
}
}
+ // If we have any daemons left, wait 200ms to ensure they are not stuck in a place where they
+ // are about to access runtime state and are not in a runnable state. Examples: Monitor code
+ // or waking up from a condition variable. TODO: Try and see if there is a better way to wait
+ // for daemon threads to be in a blocked state.
+ if (daemons_left > 0) {
+ static constexpr size_t kDaemonSleepTime = 200 * 1000;
+ usleep(kDaemonSleepTime);
+ }
// Give the threads a chance to suspend, complaining if they're slow.
bool have_complained = false;
- for (int i = 0; i < 10; ++i) {
- usleep(200 * 1000);
+ static constexpr size_t kTimeoutMicroseconds = 2000 * 1000;
+ static constexpr size_t kSleepMicroseconds = 1000;
+ for (size_t i = 0; i < kTimeoutMicroseconds / kSleepMicroseconds; ++i) {
bool all_suspended = true;
for (const auto& thread : list_) {
if (thread != self && thread->GetState() == kRunnable) {
@@ -1164,8 +1187,9 @@ void ThreadList::SuspendAllDaemonThreads() {
if (all_suspended) {
return;
}
+ usleep(kSleepMicroseconds);
}
- LOG(ERROR) << "suspend all daemons failed";
+ LOG(WARNING) << "timed out suspending all daemon threads";
}
void ThreadList::Register(Thread* self) {
DCHECK_EQ(self, Thread::Current());
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 07ea10dbea..2e73f6af7f 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -164,7 +164,7 @@ class ThreadList {
void DumpUnattachedThreads(std::ostream& os)
REQUIRES(!Locks::thread_list_lock_);
- void SuspendAllDaemonThreads()
+ void SuspendAllDaemonThreadsForShutdown()
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
void WaitForOtherNonDaemonThreadsToExit()
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
diff --git a/runtime/utils.cc b/runtime/utils.cc
index ff6b4c0d20..8e9f12b7a0 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1446,6 +1446,11 @@ bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) {
return true;
}
+bool FileExists(const std::string& filename) {
+ struct stat buffer;
+ return stat(filename.c_str(), &buffer) == 0;
+}
+
std::string PrettyDescriptor(Primitive::Type type) {
return PrettyDescriptor(Primitive::Descriptor(type));
}
@@ -1866,4 +1871,10 @@ int64_t GetFileSizeBytes(const std::string& filename) {
return rc == 0 ? stat_buf.st_size : -1;
}
+void SleepForever() {
+ while (true) {
+ usleep(1000000);
+ }
+}
+
} // namespace art
diff --git a/runtime/utils.h b/runtime/utils.h
index a07e74c62b..153749eff4 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -99,6 +99,18 @@ static inline bool NeedsEscaping(uint16_t ch) {
return (ch < ' ' || ch > '~');
}
+template <typename T> T SafeAbs(T value) {
+ // std::abs has undefined behavior on min limits.
+ DCHECK_NE(value, std::numeric_limits<T>::min());
+ return std::abs(value);
+}
+
+template <typename T> T AbsOrMin(T value) {
+ return (value == std::numeric_limits<T>::min())
+ ? value
+ : std::abs(value);
+}
+
std::string PrintableChar(uint16_t ch);
// Returns an ASCII string corresponding to the given UTF-8 string.
@@ -276,6 +288,9 @@ std::string GetSystemImageFilename(const char* location, InstructionSet isa);
// Wrapper on fork/execv to run a command in a subprocess.
bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg);
+// Returns true if the file exists.
+bool FileExists(const std::string& filename);
+
class VoidFunctor {
public:
template <typename A>
@@ -370,6 +385,9 @@ T GetRandomNumber(T min, T max) {
// Return the file size in bytes or -1 if the file does not exists.
int64_t GetFileSizeBytes(const std::string& filename);
+// Sleep forever and never come back.
+NO_RETURN void SleepForever();
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index d75587b52c..1c95648cfe 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -3639,8 +3639,9 @@ const RegType& MethodVerifier::GetCaughtExceptionType() {
return *common_super;
}
+// TODO Maybe I should just add a METHOD_SUPER to MethodType?
ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(
- uint32_t dex_method_idx, MethodType method_type) {
+ uint32_t dex_method_idx, MethodType method_type, bool is_super) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
if (klass_type.IsConflict()) {
@@ -3667,6 +3668,8 @@ ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(
res_method = klass->FindDirectMethod(name, signature, pointer_size);
} else if (method_type == METHOD_INTERFACE) {
res_method = klass->FindInterfaceMethod(name, signature, pointer_size);
+ } else if (is_super && klass->IsInterface()) {
+ res_method = klass->FindInterfaceMethod(name, signature, pointer_size);
} else {
res_method = klass->FindVirtualMethod(name, signature, pointer_size);
}
@@ -3939,7 +3942,7 @@ ArtMethod* MethodVerifier::VerifyInvocationArgs(
// we're making.
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- ArtMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type);
+ ArtMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type, is_super);
if (res_method == nullptr) { // error or class is unresolved
// Check what we can statically.
if (!have_pending_hard_failure_) {
@@ -3949,24 +3952,32 @@ ArtMethod* MethodVerifier::VerifyInvocationArgs(
}
// If we're using invoke-super(method), make sure that the executing method's class' superclass
- // has a vtable entry for the target method.
+ // has a vtable entry for the target method. Or the target is on a interface.
if (is_super) {
DCHECK(method_type == METHOD_VIRTUAL);
- const RegType& super = GetDeclaringClass().GetSuperClass(&reg_types_);
- if (super.IsUnresolvedTypes()) {
- Fail(VERIFY_ERROR_NO_METHOD) << "unknown super class in invoke-super from "
- << PrettyMethod(dex_method_idx_, *dex_file_)
- << " to super " << PrettyMethod(res_method);
- return nullptr;
- }
- mirror::Class* super_klass = super.GetClass();
- if (res_method->GetMethodIndex() >= super_klass->GetVTableLength()) {
- Fail(VERIFY_ERROR_NO_METHOD) << "invalid invoke-super from "
- << PrettyMethod(dex_method_idx_, *dex_file_)
- << " to super " << super
- << "." << res_method->GetName()
- << res_method->GetSignature();
- return nullptr;
+ if (res_method->GetDeclaringClass()->IsInterface()) {
+ // TODO Fill in this part. Verify what we can...
+ if (Runtime::Current()->IsAotCompiler()) {
+ Fail(VERIFY_ERROR_FORCE_INTERPRETER) << "Currently we only allow invoke-super in "
+ << "interpreter when using interface methods";
+ }
+ } else {
+ const RegType& super = GetDeclaringClass().GetSuperClass(&reg_types_);
+ if (super.IsUnresolvedTypes()) {
+ Fail(VERIFY_ERROR_NO_METHOD) << "unknown super class in invoke-super from "
+ << PrettyMethod(dex_method_idx_, *dex_file_)
+ << " to super " << PrettyMethod(res_method);
+ return nullptr;
+ }
+ mirror::Class* super_klass = super.GetClass();
+ if (res_method->GetMethodIndex() >= super_klass->GetVTableLength()) {
+ Fail(VERIFY_ERROR_NO_METHOD) << "invalid invoke-super from "
+ << PrettyMethod(dex_method_idx_, *dex_file_)
+ << " to super " << super
+ << "." << res_method->GetName()
+ << res_method->GetSignature();
+ return nullptr;
+ }
}
}
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 79db576993..ec0a8f9262 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -654,7 +654,7 @@ class MethodVerifier {
* the referrer can access the resolved method.
* Does not throw exceptions.
*/
- ArtMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type)
+ ArtMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type, bool is_super)
SHARED_REQUIRES(Locks::mutator_lock_);
/*
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 80b751ca0f..7c7981e5e7 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -246,28 +246,18 @@ class RegType {
}
/*
- * A basic Join operation on classes. For a pair of types S and T the Join,
- *written S v T = J, is
- * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is
- *J is the parent of
- * S and T such that there isn't a parent of both S and T that isn't also the
- *parent of J (ie J
+ * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is
+ * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is J is the parent of
+ * S and T such that there isn't a parent of both S and T that isn't also the parent of J (ie J
* is the deepest (lowest upper bound) parent of S and T).
*
- * This operation applies for regular classes and arrays, however, for
- *interface types there
- * needn't be a partial ordering on the types. We could solve the problem of a
- *lack of a partial
- * order by introducing sets of types, however, the only operation permissible
- *on an interface is
- * invoke-interface. In the tradition of Java verifiers [1] we defer the
- *verification of interface
- * types until an invoke-interface call on the interface typed reference at
- *runtime and allow
- * the perversion of Object being assignable to an interface type (note,
- *however, that we don't
- * allow assignment of Object or Interface to any concrete class and are
- *therefore type safe).
+ * This operation applies for regular classes and arrays, however, for interface types there
+ * needn't be a partial ordering on the types. We could solve the problem of a lack of a partial
+ * order by introducing sets of types, however, the only operation permissible on an interface is
+ * invoke-interface. In the tradition of Java verifiers [1] we defer the verification of interface
+ * types until an invoke-interface call on the interface typed reference at runtime and allow
+ * the perversion of Object being assignable to an interface type (note, however, that we don't
+ * allow assignment of Object or Interface to any concrete class and are therefore type safe).
*
* [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy
*/
diff --git a/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc b/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc
new file mode 100644
index 0000000000..54879fbad9
--- /dev/null
+++ b/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include "base/casts.h"
+#include "base/macros.h"
+#include "java_vm_ext.h"
+#include "jni_env_ext.h"
+#include "thread-inl.h"
+
+namespace art {
+namespace {
+
+static volatile std::atomic<bool> vm_was_shutdown(false);
+
+extern "C" JNIEXPORT void JNICALL Java_Main_waitAndCallIntoJniEnv(JNIEnv* env, jclass) {
+ // Wait until the runtime is shutdown.
+ while (!vm_was_shutdown.load()) {
+ usleep(1000);
+ }
+ std::cout << "About to call exception check\n";
+ env->ExceptionCheck();
+ LOG(ERROR) << "Should not be reached!";
+}
+
+// NO_RETURN does not work with extern "C" for target builds.
+extern "C" JNIEXPORT void JNICALL Java_Main_destroyJavaVMAndExit(JNIEnv* env, jclass) {
+ // Fake up the managed stack so we can detach.
+ Thread* const self = Thread::Current();
+ self->SetTopOfStack(nullptr);
+ self->SetTopOfShadowStack(nullptr);
+ JavaVM* vm = down_cast<JNIEnvExt*>(env)->vm;
+ vm->DetachCurrentThread();
+ vm->DestroyJavaVM();
+ vm_was_shutdown.store(true);
+ // Give threads some time to get stuck in ExceptionCheck.
+ usleep(1000000);
+ if (env != nullptr) {
+ // Use env != nullptr to trick noreturn.
+ exit(0);
+ }
+}
+
+} // namespace
+} // namespace art
diff --git a/test/136-daemon-jni-shutdown/expected.txt b/test/136-daemon-jni-shutdown/expected.txt
new file mode 100644
index 0000000000..f0b6353e9f
--- /dev/null
+++ b/test/136-daemon-jni-shutdown/expected.txt
@@ -0,0 +1,5 @@
+JNI_OnLoad called
+About to call exception check
+About to call exception check
+About to call exception check
+About to call exception check
diff --git a/test/136-daemon-jni-shutdown/info.txt b/test/136-daemon-jni-shutdown/info.txt
new file mode 100644
index 0000000000..06a12dff9e
--- /dev/null
+++ b/test/136-daemon-jni-shutdown/info.txt
@@ -0,0 +1 @@
+Test that daemon threads that call into a JNI env after the runtime is shutdown do not crash. \ No newline at end of file
diff --git a/test/136-daemon-jni-shutdown/src/Main.java b/test/136-daemon-jni-shutdown/src/Main.java
new file mode 100644
index 0000000000..6eceb757b1
--- /dev/null
+++ b/test/136-daemon-jni-shutdown/src/Main.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Test that daemon threads that call into a JNI env after the runtime is shutdown do not crash.
+ */
+public class Main {
+
+ public final static int THREAD_COUNT = 4;
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+
+ for (int i = 0; i < THREAD_COUNT; i++) {
+ Thread t = new Thread(new DaemonRunnable());
+ t.setDaemon(true);
+ t.start();
+ }
+ // Give threads time to start and become stuck in waitAndCallIntoJniEnv.
+ Thread.sleep(1000);
+ destroyJavaVMAndExit();
+ }
+
+ static native void waitAndCallIntoJniEnv();
+ static native void destroyJavaVMAndExit();
+
+ private static class DaemonRunnable implements Runnable {
+ public void run() {
+ for (;;) {
+ waitAndCallIntoJniEnv();
+ }
+ }
+ }
+}
diff --git a/test/137-cfi/run b/test/137-cfi/run
index ecbbbc7c48..9c567b6813 100755
--- a/test/137-cfi/run
+++ b/test/137-cfi/run
@@ -14,4 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-exec ${RUN} "$@"
+exec ${RUN} "$@" -Xcompiler-option --generate-debug-info
diff --git a/test/143-string-value/check b/test/143-string-value/check
new file mode 100755
index 0000000000..cdf7b783a3
--- /dev/null
+++ b/test/143-string-value/check
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Strip run-specific numbers (pid and line number)
+sed -e 's/^art E[ ]\+[0-9]\+[ ]\+[0-9]\+ art\/runtime\/native\/java_lang_Class.cc:[0-9]\+\] //' "$2" > "$2.tmp"
+
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/143-string-value/expected.txt b/test/143-string-value/expected.txt
new file mode 100644
index 0000000000..06cdb89e90
--- /dev/null
+++ b/test/143-string-value/expected.txt
@@ -0,0 +1 @@
+The String#value field is not present on Android versions >= 6.0
diff --git a/test/143-string-value/info.txt b/test/143-string-value/info.txt
new file mode 100644
index 0000000000..61ec816ab8
--- /dev/null
+++ b/test/143-string-value/info.txt
@@ -0,0 +1,2 @@
+Test to ensure we emit an error message when being asked
+for String#value.
diff --git a/test/143-string-value/src/Main.java b/test/143-string-value/src/Main.java
new file mode 100644
index 0000000000..e97069200a
--- /dev/null
+++ b/test/143-string-value/src/Main.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ try {
+ String.class.getDeclaredField("value");
+ throw new Error("Expected to fail");
+ } catch (ReflectiveOperationException e) {
+ // Ignore...
+ }
+ }
+}
diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java
index 43bc9d06a2..0e07f47288 100644
--- a/test/442-checker-constant-folding/src/Main.java
+++ b/test/442-checker-constant-folding/src/Main.java
@@ -120,9 +120,10 @@ public class Main {
/// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
/// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
/// CHECK-DAG: <<Const6:i\d+>> IntConstant 6
+ /// CHECK-DAG: <<Const11:i\d+>> IntConstant 11
/// CHECK-DAG: <<Add1:i\d+>> Add [<<Const1>>,<<Const2>>]
- /// CHECK-DAG: <<Add2:i\d+>> Add [<<Const5>>,<<Const6>>]
- /// CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Add2>>]
+ /// CHECK-DAG: Add [<<Const5>>,<<Const6>>]
+ /// CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Const11>>]
/// CHECK-DAG: Return [<<Add3>>]
/// CHECK-START: int Main.IntAddition2() constant_folding (after)
@@ -522,7 +523,7 @@ public class Main {
/// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
- /// CHECK-DAG: <<And:j\d+>> And [<<Const10L>>,<<TypeConv>>]
+ /// CHECK-DAG: <<And:j\d+>> And [<<TypeConv>>,<<Const10L>>]
/// CHECK-DAG: Return [<<And>>]
/// CHECK-START: long Main.AndLongInt() constant_folding (after)
@@ -567,7 +568,7 @@ public class Main {
/// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
- /// CHECK-DAG: <<Or:j\d+>> Or [<<Const10L>>,<<TypeConv>>]
+ /// CHECK-DAG: <<Or:j\d+>> Or [<<TypeConv>>,<<Const10L>>]
/// CHECK-DAG: Return [<<Or>>]
/// CHECK-START: long Main.OrLongInt() constant_folding (after)
@@ -612,7 +613,7 @@ public class Main {
/// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>]
- /// CHECK-DAG: <<Xor:j\d+>> Xor [<<Const10L>>,<<TypeConv>>]
+ /// CHECK-DAG: <<Xor:j\d+>> Xor [<<TypeConv>>,<<Const10L>>]
/// CHECK-DAG: Return [<<Xor>>]
/// CHECK-START: long Main.XorLongInt() constant_folding (after)
@@ -749,7 +750,7 @@ public class Main {
/// CHECK-START: long Main.Mul0(long) constant_folding (before)
/// CHECK-DAG: <<Arg:j\d+>> ParameterValue
/// CHECK-DAG: <<Const0:j\d+>> LongConstant 0
- /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Arg>>,<<Const0>>]
+ /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Const0>>,<<Arg>>]
/// CHECK-DAG: Return [<<Mul>>]
/// CHECK-START: long Main.Mul0(long) constant_folding (after)
diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java
index 6151fc10f2..0fd7801d48 100644
--- a/test/458-checker-instruction-simplification/src/Main.java
+++ b/test/458-checker-instruction-simplification/src/Main.java
@@ -288,7 +288,7 @@ public class Main {
/// CHECK-START: long Main.Mul1(long) instruction_simplifier (before)
/// CHECK-DAG: <<Arg:j\d+>> ParameterValue
/// CHECK-DAG: <<Const1:j\d+>> LongConstant 1
- /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Arg>>,<<Const1>>]
+ /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Const1>>,<<Arg>>]
/// CHECK-DAG: Return [<<Mul>>]
/// CHECK-START: long Main.Mul1(long) instruction_simplifier (after)
@@ -323,7 +323,7 @@ public class Main {
/// CHECK-START: long Main.MulPowerOfTwo128(long) instruction_simplifier (before)
/// CHECK-DAG: <<Arg:j\d+>> ParameterValue
/// CHECK-DAG: <<Const128:j\d+>> LongConstant 128
- /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Arg>>,<<Const128>>]
+ /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Const128>>,<<Arg>>]
/// CHECK-DAG: Return [<<Mul>>]
/// CHECK-START: long Main.MulPowerOfTwo128(long) instruction_simplifier (after)
@@ -705,7 +705,7 @@ public class Main {
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
/// CHECK-DAG: <<Neg1:i\d+>> Neg [<<Arg>>]
/// CHECK-DAG: <<Neg2:i\d+>> Neg [<<Neg1>>]
- /// CHECK-DAG: <<Add:i\d+>> Add [<<Neg1>>,<<Neg2>>]
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Neg2>>,<<Neg1>>]
/// CHECK-DAG: Return [<<Add>>]
/// CHECK-START: int Main.NegNeg2(int) instruction_simplifier (after)
@@ -841,13 +841,13 @@ public class Main {
/// CHECK-DAG: <<ConstF1:i\d+>> IntConstant -1
/// CHECK-DAG: <<Xor1:i\d+>> Xor [<<Arg>>,<<ConstF1>>]
/// CHECK-DAG: <<Xor2:i\d+>> Xor [<<Xor1>>,<<ConstF1>>]
- /// CHECK-DAG: <<Add:i\d+>> Add [<<Xor1>>,<<Xor2>>]
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Xor2>>,<<Xor1>>]
/// CHECK-DAG: Return [<<Add>>]
/// CHECK-START: int Main.NotNot2(int) instruction_simplifier (after)
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
/// CHECK-DAG: <<Not:i\d+>> Not [<<Arg>>]
- /// CHECK-DAG: <<Add:i\d+>> Add [<<Not>>,<<Arg>>]
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Arg>>,<<Not>>]
/// CHECK-DAG: Return [<<Add>>]
/// CHECK-START: int Main.NotNot2(int) instruction_simplifier (after)
@@ -1005,7 +1005,7 @@ public class Main {
/// CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier (before)
/// CHECK-DAG: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Const0>>,<<Arg>>]
+ /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Const0>>]
/// CHECK-DAG: If [<<Cond>>]
/// CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier (after)
@@ -1064,7 +1064,7 @@ public class Main {
/// CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier (before)
/// CHECK-DAG: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Const0>>,<<Arg>>]
+ /// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Arg>>,<<Const0>>]
/// CHECK-DAG: If [<<Cond>>]
/// CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier (after)
@@ -1234,7 +1234,7 @@ public class Main {
/// CHECK-START: long Main.mulPow2Minus1(long) instruction_simplifier (before)
/// CHECK-DAG: <<Arg:j\d+>> ParameterValue
/// CHECK-DAG: <<Const31:j\d+>> LongConstant 31
- /// CHECK: Mul [<<Arg>>,<<Const31>>]
+ /// CHECK: Mul [<<Const31>>,<<Arg>>]
/// CHECK-START: long Main.mulPow2Minus1(long) instruction_simplifier (after)
/// CHECK-DAG: <<Arg:j\d+>> ParameterValue
diff --git a/test/482-checker-loop-back-edge-use/src/Main.java b/test/482-checker-loop-back-edge-use/src/Main.java
index 6b4da9de27..d0b33b9282 100644
--- a/test/482-checker-loop-back-edge-use/src/Main.java
+++ b/test/482-checker-loop-back-edge-use/src/Main.java
@@ -163,8 +163,8 @@ public class Main {
/// CHECK: <<Arg:z\d+>> StaticFieldGet liveness:<<ArgLiv:\d+>> ranges:{[<<ArgLiv>>,<<ArgLoopUse:\d+>>)} uses:[<<ArgUse:\d+>>,<<ArgLoopUse>>]
/// CHECK: If [<<Arg>>] liveness:<<IfLiv:\d+>>
/// CHECK: Goto liveness:<<GotoLiv1:\d+>>
- /// CHECK: Goto liveness:<<GotoLiv2:\d+>>
/// CHECK: Exit
+ /// CHECK: Goto liveness:<<GotoLiv2:\d+>>
/// CHECK-EVAL: <<IfLiv>> + 1 == <<ArgUse>>
/// CHECK-EVAL: <<GotoLiv1>> < <<GotoLiv2>>
/// CHECK-EVAL: <<GotoLiv1>> + 2 == <<ArgLoopUse>>
diff --git a/test/529-checker-unresolved/expected.txt b/test/529-checker-unresolved/expected.txt
index 1e7dbfed2e..1590a2a280 100644
--- a/test/529-checker-unresolved/expected.txt
+++ b/test/529-checker-unresolved/expected.txt
@@ -5,3 +5,6 @@ UnresolvedClass.interfaceMethod()
UnresolvedClass.superMethod()
instanceof ok
checkcast ok
+UnresolvedClass.directCall()
+UnresolvedClass.directCall()
+UnresolvedClass.directCall()
diff --git a/test/529-checker-unresolved/src/Main.java b/test/529-checker-unresolved/src/Main.java
index 5219c04c37..872fa6d0dd 100644
--- a/test/529-checker-unresolved/src/Main.java
+++ b/test/529-checker-unresolved/src/Main.java
@@ -138,6 +138,27 @@ public class Main extends UnresolvedSuperClass {
callUnresolvedInstanceFieldAccess(c);
testInstanceOf(m);
testCheckCast(m);
+ testLicm(2);
+ }
+
+ /// CHECK-START: void Main.testLicm(int) licm (before)
+ /// CHECK: <<Class:l\d+>> LoadClass loop:B2
+ /// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:B2
+ /// CHECK-NEXT: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2
+ /// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2
+
+ /// CHECK-START: void Main.testLicm(int) licm (after)
+ /// CHECK: <<Class:l\d+>> LoadClass loop:none
+ /// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:none
+ /// CHECK: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2
+ /// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2
+ static public void testLicm(int count) {
+ // Test to make sure we keep the initialization check after loading an unresolved class.
+ UnresolvedClass c;
+ int i = 0;
+ do {
+ c = new UnresolvedClass();
+ } while (i++ != count);
}
public static void expectEquals(byte expected, byte result) {
diff --git a/test/561-divrem/expected.txt b/test/561-divrem/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/561-divrem/expected.txt
diff --git a/test/561-divrem/info.txt b/test/561-divrem/info.txt
new file mode 100644
index 0000000000..71c9601574
--- /dev/null
+++ b/test/561-divrem/info.txt
@@ -0,0 +1,2 @@
+Regression test for div/rem taking Integer.MIN_VALUE and
+Long.MIN_VALUE.
diff --git a/test/561-divrem/src/Main.java b/test/561-divrem/src/Main.java
new file mode 100644
index 0000000000..082783df2d
--- /dev/null
+++ b/test/561-divrem/src/Main.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void assertEquals(int expected, int actual) {
+ if (expected != actual) {
+ throw new Error("Expected " + expected + ", got " + actual);
+ }
+ }
+
+ public static void assertEquals(long expected, long actual) {
+ if (expected != actual) {
+ throw new Error("Expected " + expected + ", got " + actual);
+ }
+ }
+
+ public static void main(String[] args) {
+ assertEquals(0, $noinline$divInt(1));
+ assertEquals(1, $noinline$remInt(1));
+
+ assertEquals(0, $noinline$divInt(-1));
+ assertEquals(-1, $noinline$remInt(-1));
+
+ assertEquals(0, $noinline$divInt(0));
+ assertEquals(0, $noinline$remInt(0));
+
+ assertEquals(1, $noinline$divInt(Integer.MIN_VALUE));
+ assertEquals(0, $noinline$remInt(Integer.MIN_VALUE));
+
+ assertEquals(0, $noinline$divInt(Integer.MAX_VALUE));
+ assertEquals(Integer.MAX_VALUE, $noinline$remInt(Integer.MAX_VALUE));
+
+ assertEquals(0, $noinline$divInt(Integer.MAX_VALUE - 1));
+ assertEquals(Integer.MAX_VALUE - 1, $noinline$remInt(Integer.MAX_VALUE - 1));
+
+ assertEquals(0, $noinline$divInt(Integer.MIN_VALUE + 1));
+ assertEquals(Integer.MIN_VALUE + 1, $noinline$remInt(Integer.MIN_VALUE + 1));
+
+ assertEquals(0L, $noinline$divLong(1L));
+ assertEquals(1L, $noinline$remLong(1L));
+
+ assertEquals(0L, $noinline$divLong(-1L));
+ assertEquals(-1L, $noinline$remLong(-1L));
+
+ assertEquals(0L, $noinline$divLong(0L));
+ assertEquals(0L, $noinline$remLong(0L));
+
+ assertEquals(1L, $noinline$divLong(Long.MIN_VALUE));
+ assertEquals(0L, $noinline$remLong(Long.MIN_VALUE));
+
+ assertEquals(0L, $noinline$divLong(Long.MAX_VALUE));
+ assertEquals(Long.MAX_VALUE, $noinline$remLong(Long.MAX_VALUE));
+
+ assertEquals(0L, $noinline$divLong(Long.MAX_VALUE - 1));
+ assertEquals(Long.MAX_VALUE - 1, $noinline$remLong(Long.MAX_VALUE - 1));
+
+ assertEquals(0L, $noinline$divLong(Integer.MIN_VALUE + 1));
+ assertEquals(Long.MIN_VALUE + 1, $noinline$remLong(Long.MIN_VALUE + 1));
+ }
+
+ public static int $noinline$divInt(int value) {
+ if (doThrow) {
+ throw new Error("");
+ }
+ return value / Integer.MIN_VALUE;
+ }
+
+ public static int $noinline$remInt(int value) {
+ if (doThrow) {
+ throw new Error("");
+ }
+ return value % Integer.MIN_VALUE;
+ }
+
+ public static long $noinline$divLong(long value) {
+ if (doThrow) {
+ throw new Error("");
+ }
+ return value / Long.MIN_VALUE;
+ }
+
+ public static long $noinline$remLong(long value) {
+ if (doThrow) {
+ throw new Error("");
+ }
+ return value % Long.MIN_VALUE;
+ }
+
+ static boolean doThrow = false;
+}
diff --git a/test/561-shared-slowpaths/expected.txt b/test/561-shared-slowpaths/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/561-shared-slowpaths/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/561-shared-slowpaths/info.txt b/test/561-shared-slowpaths/info.txt
new file mode 100644
index 0000000000..c51e70b452
--- /dev/null
+++ b/test/561-shared-slowpaths/info.txt
@@ -0,0 +1 @@
+Test on correctness while possibly sharing slow paths.
diff --git a/test/561-shared-slowpaths/src/Main.java b/test/561-shared-slowpaths/src/Main.java
new file mode 100644
index 0000000000..718b8750a7
--- /dev/null
+++ b/test/561-shared-slowpaths/src/Main.java
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Test on correctness in situations where slow paths may be shared
+// (actual sharing may vary between different code generators).
+//
+//
+public class Main {
+
+ // A method with two loops that can be optimized with dynamic BCE,
+ // resulting in a two times a deopt on null, a deopt on lower OOB,
+ // and a deopt on upper OOB.
+ private static void init(int[] x, int [] y, int l1, int h1, int l2, int h2) {
+ for (int i = l1; i < h1; i++) {
+ x[i] = i;
+ }
+ for (int i = l2; i < h2; i++) {
+ y[i] = i;
+ }
+ }
+
+ // Test that each of the six possible exceptions situations for init()
+ // are correctly handled by the deopt instructions.
+ public static void main(String[] args) {
+ int[] x = new int[100];
+ int[] y = new int[100];
+ int z;
+
+ // All is well.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, 0, 100, 0, 100);
+ } catch (Exception e) {
+ z = 1;
+ }
+ expectEquals(z, 0);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], i);
+ }
+
+ // Null deopt on x.
+ z = 0;
+ reset(x, y);
+ try {
+ init(null, y, 0, 100, 0, 100);
+ } catch (NullPointerException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], 0);
+ expectEquals(y[i], 0);
+ }
+
+ // Lower out-of-bounds on x.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, -1, 100, 0, 100);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], 0);
+ expectEquals(y[i], 0);
+ }
+
+ // Upper out-of-bounds on x.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, 0, 101, 0, 100);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], 0);
+ }
+
+ // Null deopt on y.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, null, 0, 100, 0, 100);
+ } catch (NullPointerException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], 0);
+ }
+
+ // Lower out-of-bounds on y.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, 0, 100, -1, 100);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], 0);
+ }
+
+ // Upper out-of-bounds on y.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, 0, 100, 0, 101);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], i);
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void reset(int[] x, int[] y) {
+ for (int i = 0; i < x.length; i++) x[i] = 0;
+ for (int i = 0; i < y.length; i++) y[i] = 0;
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/562-bce-preheader/expected.txt b/test/562-bce-preheader/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/562-bce-preheader/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/562-bce-preheader/info.txt b/test/562-bce-preheader/info.txt
new file mode 100644
index 0000000000..ae006aca47
--- /dev/null
+++ b/test/562-bce-preheader/info.txt
@@ -0,0 +1 @@
+Regression test for correct placement of hoisting/deopting code.
diff --git a/test/562-bce-preheader/src/Main.java b/test/562-bce-preheader/src/Main.java
new file mode 100644
index 0000000000..8de0533591
--- /dev/null
+++ b/test/562-bce-preheader/src/Main.java
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ /**
+ * Method with an outer countable loop and an inner do-while loop.
+ * Since all work is done in the header of the inner loop, any invariant hoisting
+ * and deopting should be done in its proper loop preheader, not the true-block
+ * of the newly generated taken-test after dynamic BCE.
+ */
+ public static int doit(int[][] x, int j) {
+ float f = 0;
+ int acc = 0;
+ for (int i = 0; i < 2; i++) {
+ // The full body of a do-while loop is the loop header.
+ do {
+ // Some "noise" to avoid hoisting the array reference
+ // before the dynamic BCE phase runs.
+ f++;
+ // The invariant array reference with corresponding bounds check
+ // is a candidate for hoisting when dynamic BCE runs. If it is
+ // not moved to the proper loop preheader, the wrong values
+ // cause the test to fail.
+ acc += x[i][i];
+ } while (++j < i);
+ }
+ return acc;
+ }
+
+ /**
+ * Single countable loop with a clear header and a loop body. In this case,
+ * after dynamic bce, some invariant hoisting and deopting must go to the
+ * proper loop preheader and some must go to the true-block.
+ */
+ public static int foo(int[] x, int[] y, int n) {
+ float f = 0;
+ int acc = 0;
+ int i = 0;
+ while (true) {
+ // This part is the loop header.
+ // Some "noise" to avoid hoisting the array reference
+ // before the dynamic BCE phase runs.
+ f++;
+ // The invariant array reference with corresponding bounds check
+ // is a candidate for hoisting when dynamic BCE runs. If it is
+ // not moved to the proper loop preheader, the wrong values
+ // cause the test to fail.
+ acc += y[0];
+ if (++i > n)
+ break;
+ // From here on, this part is the loop body.
+ // The unit-stride array reference is a candidate for dynamic BCE.
+ // The deopting appears in the true-block.
+ acc += x[i];
+ }
+ return acc;
+ }
+
+ public static void main(String args[]) {
+ int[][] x = new int[2][2];
+ int y;
+
+ x[0][0] = 1;
+ x[1][1] = 2;
+
+ expectEquals(8, doit(x, -6));
+ expectEquals(7, doit(x, -5));
+ expectEquals(6, doit(x, -4));
+ expectEquals(5, doit(x, -3));
+ expectEquals(4, doit(x, -2));
+ expectEquals(3, doit(x, -1));
+ expectEquals(3, doit(x, 0));
+ expectEquals(3, doit(x, 1));
+ expectEquals(3, doit(x, 22));
+
+ int a[] = { 1, 2, 3, 5 };
+ int b[] = { 7 };
+
+ expectEquals(7, foo(a, b, -1));
+ expectEquals(7, foo(a, b, 0));
+ expectEquals(16, foo(a, b, 1));
+ expectEquals(26, foo(a, b, 2));
+ expectEquals(38, foo(a, b, 3));
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/562-no-intermediate/expected.txt b/test/562-no-intermediate/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/562-no-intermediate/expected.txt
diff --git a/test/562-no-intermediate/info.txt b/test/562-no-intermediate/info.txt
new file mode 100644
index 0000000000..4f21aebd03
--- /dev/null
+++ b/test/562-no-intermediate/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing, checking that there is no
+intermediate address between a Java call.
diff --git a/test/562-no-intermediate/src/Main.java b/test/562-no-intermediate/src/Main.java
new file mode 100644
index 0000000000..3b74d6f269
--- /dev/null
+++ b/test/562-no-intermediate/src/Main.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ /// CHECK-START-ARM64: int Main.main(String[]) register_allocator (after)
+ /// CHECK-NOT: IntermediateAddress
+ public static void main(String[] args) {
+ array[index] += Math.cos(42);
+ }
+
+ static int index = 0;
+ static double[] array = new double[2];
+}
diff --git a/test/701-easy-div-rem/genMain.py b/test/701-easy-div-rem/genMain.py
index 75eee17fe6..b6c769fd55 100644
--- a/test/701-easy-div-rem/genMain.py
+++ b/test/701-easy-div-rem/genMain.py
@@ -13,25 +13,27 @@
# limitations under the License.
upper_bound_int_pow2 = 31
+upper_bound_int_pow2_neg = 32
upper_bound_long_pow2 = 63
+upper_bound_long_pow2_neg = 64
upper_bound_constant = 100
all_tests = [
({'@INT@': 'int', '@SUFFIX@':''},
[('CheckDiv', 'idiv_by_pow2_', [2**i for i in range(upper_bound_int_pow2)]),
- ('CheckDiv', 'idiv_by_pow2_neg_', [-2**i for i in range(upper_bound_int_pow2)]),
+ ('CheckDiv', 'idiv_by_pow2_neg_', [-2**i for i in range(upper_bound_int_pow2_neg)]),
('CheckDiv', 'idiv_by_constant_', [i for i in range(1, upper_bound_constant)]),
('CheckDiv', 'idiv_by_constant_neg_', [-i for i in range(1, upper_bound_constant)]),
('CheckRem', 'irem_by_pow2_', [2**i for i in range(upper_bound_int_pow2)]),
- ('CheckRem', 'irem_by_pow2_neg_', [-2**i for i in range(upper_bound_int_pow2)]),
+ ('CheckRem', 'irem_by_pow2_neg_', [-2**i for i in range(upper_bound_int_pow2_neg)]),
('CheckRem', 'irem_by_constant_', [i for i in range(1, upper_bound_constant)]),
('CheckRem', 'irem_by_constant_neg_', [-i for i in range(1, upper_bound_constant)])]),
({'@INT@': 'long', '@SUFFIX@': 'l'},
[('CheckDiv', 'ldiv_by_pow2_', [2**i for i in range(upper_bound_long_pow2)]),
- ('CheckDiv', 'ldiv_by_pow2_neg_', [-2**i for i in range(upper_bound_long_pow2)]),
+ ('CheckDiv', 'ldiv_by_pow2_neg_', [-2**i for i in range(upper_bound_long_pow2_neg)]),
('CheckDiv', 'ldiv_by_constant_', [i for i in range(1, upper_bound_constant)]),
('CheckDiv', 'ldiv_by_constant_neg_', [-i for i in range(1, upper_bound_constant)]),
('CheckRem', 'lrem_by_pow2_', [2**i for i in range(upper_bound_long_pow2)]),
- ('CheckRem', 'lrem_by_pow2_neg_', [-2**i for i in range(upper_bound_long_pow2)]),
+ ('CheckRem', 'lrem_by_pow2_neg_', [-2**i for i in range(upper_bound_long_pow2_neg)]),
('CheckRem', 'lrem_by_constant_', [i for i in range(1, upper_bound_constant)]),
('CheckRem', 'lrem_by_constant_neg_', [-i for i in range(1, upper_bound_constant)])])
]
diff --git a/test/969-iface-super/build b/test/969-iface-super/build
new file mode 100755
index 0000000000..b1ef320d05
--- /dev/null
+++ b/test/969-iface-super/build
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+# Should we compile with Java source code. By default we will use Smali.
+USES_JAVA_SOURCE="false"
+if [[ $@ == *"--jvm"* ]]; then
+ USES_JAVA_SOURCE="true"
+elif [[ "$USE_JACK" == "true" ]]; then
+ if $JACK -D jack.java.source.version=1.8 2>/dev/null; then
+ USES_JAVA_SOURCE="true"
+ else
+ echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2
+ fi
+fi
+
+# Generate the smali Main.smali file or fail
+${ANDROID_BUILD_TOP}/art/test/utils/python/generate_smali_main.py ./smali
+
+if [[ "$USES_JAVA_SOURCE" == "true" ]]; then
+ # We are compiling java code, create it.
+ mkdir -p src
+ ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+ # Ignore the smali directory.
+ EXTRA_ARGS="--no-smali"
+fi
+
+./default-build "$@" "$EXTRA_ARGS" --experimental default-methods
diff --git a/test/969-iface-super/expected.txt b/test/969-iface-super/expected.txt
new file mode 100644
index 0000000000..f310d10e12
--- /dev/null
+++ b/test/969-iface-super/expected.txt
@@ -0,0 +1,47 @@
+Testing for type A
+A-virtual A.SayHi()='Hello '
+A-interface iface.SayHi()='Hello '
+End testing for type A
+Testing for type B
+B-virtual B.SayHi()='Hello Hello '
+B-interface iface.SayHi()='Hello Hello '
+B-interface iface2.SayHi()='Hello Hello '
+End testing for type B
+Testing for type C
+C-virtual C.SayHi()='Hello and welcome '
+C-interface iface.SayHi()='Hello and welcome '
+End testing for type C
+Testing for type D
+D-virtual D.SayHi()='Hello Hello and welcome '
+D-interface iface.SayHi()='Hello Hello and welcome '
+D-interface iface2.SayHi()='Hello Hello and welcome '
+End testing for type D
+Testing for type E
+E-virtual E.SayHi()='Hello there!'
+E-interface iface.SayHi()='Hello there!'
+E-interface iface3.SayHi()='Hello there!'
+End testing for type E
+Testing for type F
+F-virtual E.SayHi()='Hello there!'
+F-virtual F.SayHi()='Hello there!'
+F-interface iface.SayHi()='Hello there!'
+F-interface iface3.SayHi()='Hello there!'
+F-virtual F.SaySurprisedHi()='Hello there!!'
+End testing for type F
+Testing for type G
+G-virtual E.SayHi()='Hello there!?'
+G-virtual F.SayHi()='Hello there!?'
+G-virtual G.SayHi()='Hello there!?'
+G-interface iface.SayHi()='Hello there!?'
+G-interface iface3.SayHi()='Hello there!?'
+G-virtual F.SaySurprisedHi()='Hello there!!'
+G-virtual G.SaySurprisedHi()='Hello there!!'
+G-virtual G.SayVerySurprisedHi()='Hello there!!!'
+End testing for type G
+Testing for type H
+H-virtual H.SayConfusedHi()='Hello ?!'
+H-virtual A.SayHi()='Hello ?'
+H-virtual H.SayHi()='Hello ?'
+H-interface iface.SayHi()='Hello ?'
+H-virtual H.SaySurprisedHi()='Hello !'
+End testing for type H
diff --git a/test/969-iface-super/info.txt b/test/969-iface-super/info.txt
new file mode 100644
index 0000000000..c0555d2afc
--- /dev/null
+++ b/test/969-iface-super/info.txt
@@ -0,0 +1,6 @@
+Smali-based tests for experimental interface static methods.
+
+This tests invoke-super with default methods.
+
+To run with --jvm you must export JAVA_HOME to a java-8 installation and pass
+the --use-java-home to run-test
diff --git a/test/969-iface-super/run b/test/969-iface-super/run
new file mode 100755
index 0000000000..8944ea92d3
--- /dev/null
+++ b/test/969-iface-super/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+${RUN} "$@" --experimental default-methods
diff --git a/test/969-iface-super/smali/A.smali b/test/969-iface-super/smali/A.smali
new file mode 100644
index 0000000000..e7760a1062
--- /dev/null
+++ b/test/969-iface-super/smali/A.smali
@@ -0,0 +1,28 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class A implements iface {
+# }
+
+.class public LA;
+.super Ljava/lang/Object;
+.implements Liface;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
diff --git a/test/969-iface-super/smali/B.smali b/test/969-iface-super/smali/B.smali
new file mode 100644
index 0000000000..e529d0534f
--- /dev/null
+++ b/test/969-iface-super/smali/B.smali
@@ -0,0 +1,28 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class B implements iface2 {
+# }
+
+.class public LB;
+.super Ljava/lang/Object;
+.implements Liface2;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
diff --git a/test/969-iface-super/smali/C.smali b/test/969-iface-super/smali/C.smali
new file mode 100644
index 0000000000..6fbb0c4b0e
--- /dev/null
+++ b/test/969-iface-super/smali/C.smali
@@ -0,0 +1,41 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class C implements iface {
+# public String SayHi() {
+# return iface.super.SayHi() + " and welcome ";
+# }
+# }
+
+.class public LC;
+.super Ljava/lang/Object;
+.implements Liface;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public SayHi()Ljava/lang/String;
+ .locals 2
+ invoke-super {p0}, Liface;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ const-string v1, " and welcome "
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/969-iface-super/smali/D.smali b/test/969-iface-super/smali/D.smali
new file mode 100644
index 0000000000..ecd4629584
--- /dev/null
+++ b/test/969-iface-super/smali/D.smali
@@ -0,0 +1,41 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class D implements iface2 {
+# public String SayHi() {
+# return iface2.super.SayHi() + " and welcome ";
+# }
+# }
+
+.class public LD;
+.super Ljava/lang/Object;
+.implements Liface2;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public SayHi()Ljava/lang/String;
+ .locals 2
+ invoke-super {p0}, Liface2;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ const-string v1, " and welcome "
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/969-iface-super/smali/E.smali b/test/969-iface-super/smali/E.smali
new file mode 100644
index 0000000000..558aaea5d6
--- /dev/null
+++ b/test/969-iface-super/smali/E.smali
@@ -0,0 +1,41 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class E implements iface3 {
+# public String SayHi() {
+# return iface3.super.SayHi() + " there!";
+# }
+# }
+
+.class public LE;
+.super Ljava/lang/Object;
+.implements Liface3;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public SayHi()Ljava/lang/String;
+ .locals 2
+ invoke-super {p0}, Liface3;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ const-string v1, " there!"
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/969-iface-super/smali/F.smali b/test/969-iface-super/smali/F.smali
new file mode 100644
index 0000000000..c402d5cdc9
--- /dev/null
+++ b/test/969-iface-super/smali/F.smali
@@ -0,0 +1,40 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class F extends E {
+# public String SaySurprisedHi() {
+# return super.SayHi() + "!";
+# }
+# }
+
+.class public LF;
+.super LE;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, LE;-><init>()V
+ return-void
+.end method
+
+.method public SaySurprisedHi()Ljava/lang/String;
+ .registers 2
+ invoke-super {p0}, LE;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ const-string v1, "!"
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/969-iface-super/smali/G.smali b/test/969-iface-super/smali/G.smali
new file mode 100644
index 0000000000..45705e6d86
--- /dev/null
+++ b/test/969-iface-super/smali/G.smali
@@ -0,0 +1,53 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class G extends F {
+# public String SayHi() {
+# return super.SayHi() + "?";
+# }
+# public String SayVerySurprisedHi() {
+# return super.SaySurprisedHi() + "!";
+# }
+# }
+
+.class public LG;
+.super LF;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, LF;-><init>()V
+ return-void
+.end method
+
+.method public SayHi()Ljava/lang/String;
+ .locals 2
+ invoke-super {p0}, LF;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ const-string v1, "?"
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
+
+.method public SayVerySurprisedHi()Ljava/lang/String;
+ .locals 2
+ invoke-super {p0}, LF;->SaySurprisedHi()Ljava/lang/String;
+ move-result-object v0
+ const-string v1, "!"
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/969-iface-super/smali/H.smali b/test/969-iface-super/smali/H.smali
new file mode 100644
index 0000000000..12f246b8c3
--- /dev/null
+++ b/test/969-iface-super/smali/H.smali
@@ -0,0 +1,66 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class H extends A {
+# public String SayHi() {
+# return super.SayHi() + "?";
+# }
+# public String SaySurprisedHi() {
+# return super.SayHi() + "!";
+# }
+# public String SayConfusedHi() {
+# return SayHi() + "!";
+# }
+# }
+
+.class public LH;
+.super LA;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, LA;-><init>()V
+ return-void
+.end method
+
+.method public SayHi()Ljava/lang/String;
+ .locals 2
+ invoke-super {p0}, LA;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ const-string v1, "?"
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
+
+.method public SaySurprisedHi()Ljava/lang/String;
+ .locals 2
+ invoke-super {p0}, LA;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ const-string v1, "!"
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
+
+.method public SayConfusedHi()Ljava/lang/String;
+ .locals 2
+ invoke-virtual {p0}, LH;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ const-string v1, "!"
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/969-iface-super/smali/classes.xml b/test/969-iface-super/smali/classes.xml
new file mode 100644
index 0000000000..4d205bd606
--- /dev/null
+++ b/test/969-iface-super/smali/classes.xml
@@ -0,0 +1,99 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<data>
+ <classes>
+ <class name="A" super="java/lang/Object">
+ <implements>
+ <item>iface</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="B" super="java/lang/Object">
+ <implements>
+ <item>iface2</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="C" super="java/lang/Object">
+ <implements>
+ <item>iface</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="D" super="java/lang/Object">
+ <implements>
+ <item>iface2</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="E" super="java/lang/Object">
+ <implements>
+ <item>iface3</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="F" super="E">
+ <implements> </implements>
+ <methods>
+ <item>SaySurprisedHi</item>
+ </methods>
+ </class>
+
+ <class name="G" super="F">
+ <implements> </implements>
+ <methods>
+ <item>SayVerySurprisedHi</item>
+ </methods>
+ </class>
+
+ <class name="H" super="A">
+ <implements> </implements>
+ <methods>
+ <item>SaySurprisedHi</item>
+ <item>SayConfusedHi</item>
+ </methods>
+ </class>
+ </classes>
+
+ <interfaces>
+ <interface name="iface" super="java/lang/Object">
+ <implements> </implements>
+ <methods>
+ <item>SayHi</item>
+ </methods>
+ </interface>
+
+ <interface name="iface2" super="java/lang/Object">
+ <implements>
+ <item>iface</item>
+ </implements>
+ <methods> </methods>
+ </interface>
+
+ <interface name="iface3" super="java/lang/Object">
+ <implements>
+ <item>iface</item>
+ </implements>
+ <methods> </methods>
+ </interface>
+ </interfaces>
+</data>
diff --git a/test/969-iface-super/smali/iface.smali b/test/969-iface-super/smali/iface.smali
new file mode 100644
index 0000000000..08bb93dd0c
--- /dev/null
+++ b/test/969-iface-super/smali/iface.smali
@@ -0,0 +1,30 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public interface iface {
+# public default String SayHi() {
+# return "Hello ";
+# }
+# }
+
+.class public abstract interface Liface;
+.super Ljava/lang/Object;
+
+.method public SayHi()Ljava/lang/String;
+ .locals 1
+ const-string v0, "Hello "
+ return-object v0
+.end method
diff --git a/test/969-iface-super/smali/iface2.smali b/test/969-iface-super/smali/iface2.smali
new file mode 100644
index 0000000000..ce6f86432d
--- /dev/null
+++ b/test/969-iface-super/smali/iface2.smali
@@ -0,0 +1,36 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public interface iface2 extends iface {
+# public default String SayHi() {
+# return iface.super.SayHi() + iface.super.SayHi();
+# }
+# }
+
+.class public abstract interface Liface2;
+.super Ljava/lang/Object;
+.implements Liface;
+
+.method public SayHi()Ljava/lang/String;
+ .locals 2
+ invoke-super {p0}, Liface;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ invoke-super {p0}, Liface;->SayHi()Ljava/lang/String;
+ move-result-object v1
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/969-iface-super/smali/iface3.smali b/test/969-iface-super/smali/iface3.smali
new file mode 100644
index 0000000000..bf200364ec
--- /dev/null
+++ b/test/969-iface-super/smali/iface3.smali
@@ -0,0 +1,22 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public interface iface3 extends iface {
+# }
+
+.class public abstract interface Liface3;
+.super Ljava/lang/Object;
+.implements Liface;
diff --git a/test/970-iface-super-resolution-generated/build b/test/970-iface-super-resolution-generated/build
new file mode 100755
index 0000000000..2d9830b970
--- /dev/null
+++ b/test/970-iface-super-resolution-generated/build
@@ -0,0 +1,55 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+# We will be making more files than the ulimit is set to allow. Remove it temporarily.
+OLD_ULIMIT=`ulimit -S`
+ulimit -S unlimited
+
+restore_ulimit() {
+ ulimit -S "$OLD_ULIMIT"
+}
+trap 'restore_ulimit' ERR
+
+# Should we compile with Java source code. By default we will use Smali.
+USES_JAVA_SOURCE="false"
+if [[ $@ == *"--jvm"* ]]; then
+ USES_JAVA_SOURCE="true"
+elif [[ "$USE_JACK" == "true" ]]; then
+ if $JACK -D jack.java.source.version=1.8 2>/dev/null; then
+ USES_JAVA_SOURCE="true"
+ else
+ echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2
+ fi
+fi
+
+if [[ "$USES_JAVA_SOURCE" == "true" ]]; then
+ # Build the Java files
+ mkdir -p src
+ mkdir -p src2
+ ./util-src/generate_java.py ./src2 ./src ./expected.txt
+else
+ # Generate the smali files and expected.txt or fail
+ mkdir -p smali
+ ./util-src/generate_smali.py ./smali ./expected.txt
+fi
+
+./default-build "$@" --experimental default-methods
+
+# Reset the ulimit back to its initial value
+restore_ulimit
diff --git a/test/970-iface-super-resolution-generated/expected.txt b/test/970-iface-super-resolution-generated/expected.txt
new file mode 100644
index 0000000000..1ddd65d177
--- /dev/null
+++ b/test/970-iface-super-resolution-generated/expected.txt
@@ -0,0 +1 @@
+This file is generated by util-src/generate_smali.py do not directly modify!
diff --git a/test/970-iface-super-resolution-generated/info.txt b/test/970-iface-super-resolution-generated/info.txt
new file mode 100644
index 0000000000..2cd2cc75b7
--- /dev/null
+++ b/test/970-iface-super-resolution-generated/info.txt
@@ -0,0 +1,17 @@
+Smali-based tests for experimental interface default methods.
+
+This tests that interface method resolution order is correct.
+
+Obviously needs to run under ART or a Java 8 Language runtime and compiler.
+
+When run smali test files are generated by the util-src/generate_smali.py
+script. If we run with --jvm we will use the
+$(ANDROID_BUILD_TOP)/art/tools/extract-embedded-java script to turn the smali
+into equivalent Java using the embedded Java code.
+
+Care should be taken when updating the generate_smali.py script. It should always
+return equivalent output when run multiple times and the expected output should
+be valid.
+
+Do not modify the expected.txt file. It is generated on each run by
+util-src/generate_smali.py.
diff --git a/test/970-iface-super-resolution-generated/run b/test/970-iface-super-resolution-generated/run
new file mode 100755
index 0000000000..6d2930d463
--- /dev/null
+++ b/test/970-iface-super-resolution-generated/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+${RUN} "$@" --experimental default-methods
diff --git a/test/970-iface-super-resolution-generated/util-src/generate_java.py b/test/970-iface-super-resolution-generated/util-src/generate_java.py
new file mode 100755
index 0000000000..c12f10d790
--- /dev/null
+++ b/test/970-iface-super-resolution-generated/util-src/generate_java.py
@@ -0,0 +1,77 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generate java test files for test 966.
+"""
+
+import generate_smali as base
+import os
+import sys
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# Allow us to import mixins.
+sys.path.append(str(Path(BUILD_TOP)/"art"/"test"/"utils"/"python"))
+
+import testgen.mixins as mixins
+
+class JavaConverter(mixins.DumpMixin, mixins.Named, mixins.JavaFileMixin):
+ """
+ A class that can convert a SmaliFile to a JavaFile.
+ """
+ def __init__(self, inner):
+ self.inner = inner
+
+ def get_name(self):
+ return self.inner.get_name()
+
+ def __str__(self):
+ out = ""
+ for line in str(self.inner).splitlines(keepends = True):
+ if line.startswith("#"):
+ out += line[1:]
+ return out
+
+def main(argv):
+ final_java_dir = Path(argv[1])
+ if not final_java_dir.exists() or not final_java_dir.is_dir():
+ print("{} is not a valid java dir".format(final_java_dir), file=sys.stderr)
+ sys.exit(1)
+ initial_java_dir = Path(argv[2])
+ if not initial_java_dir.exists() or not initial_java_dir.is_dir():
+ print("{} is not a valid java dir".format(initial_java_dir), file=sys.stderr)
+ sys.exit(1)
+ expected_txt = Path(argv[3])
+ mainclass, all_files = base.create_all_test_files()
+ with expected_txt.open('w') as out:
+ print(mainclass.get_expected(), file=out)
+ for f in all_files:
+ if f.initial_build_different():
+ JavaConverter(f).dump(final_java_dir)
+ JavaConverter(f.get_initial_build_version()).dump(initial_java_dir)
+ else:
+ JavaConverter(f).dump(initial_java_dir)
+ if isinstance(f, base.TestInterface):
+ JavaConverter(f).dump(final_java_dir)
+
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/test/970-iface-super-resolution-generated/util-src/generate_smali.py b/test/970-iface-super-resolution-generated/util-src/generate_smali.py
new file mode 100755
index 0000000000..cb7b0fa4f2
--- /dev/null
+++ b/test/970-iface-super-resolution-generated/util-src/generate_smali.py
@@ -0,0 +1,614 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generate Smali test files for test 966.
+"""
+
+import os
+import sys
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# Allow us to import utils and mixins.
+sys.path.append(str(Path(BUILD_TOP)/"art"/"test"/"utils"/"python"))
+
+from testgen.utils import get_copyright, subtree_sizes, gensym, filter_blanks
+import testgen.mixins as mixins
+
+from functools import total_ordering
+import itertools
+import string
+
+# The max depth the tree can have.
+MAX_IFACE_DEPTH = 3
+
+class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin):
+ """
+ A Main.smali file containing the Main class and the main function. It will run
+ all the test functions we have.
+ """
+
+ MAIN_CLASS_TEMPLATE = """{copyright}
+
+.class public LMain;
+.super Ljava/lang/Object;
+
+# class Main {{
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+{test_groups}
+
+{main_func}
+
+# }}
+"""
+
+ MAIN_FUNCTION_TEMPLATE = """
+# public static void main(String[] args) {{
+.method public static main([Ljava/lang/String;)V
+ .locals 2
+
+ {test_group_invoke}
+
+ return-void
+.end method
+# }}
+"""
+
+ TEST_GROUP_INVOKE_TEMPLATE = """
+# {test_name}();
+ invoke-static {{}}, {test_name}()V
+"""
+
+ def __init__(self):
+ """
+ Initialize this MainClass. We start out with no tests.
+ """
+ self.tests = set()
+
+ def add_test(self, ty):
+ """
+ Add a test for the concrete type 'ty'
+ """
+ self.tests.add(Func(ty))
+
+ def get_expected(self):
+ """
+ Get the expected output of this test.
+ """
+ all_tests = sorted(self.tests)
+ return filter_blanks("\n".join(a.get_expected() for a in all_tests))
+
+ def initial_build_different(self):
+ return False
+
+ def get_name(self):
+ """
+ Gets the name of this class
+ """
+ return "Main"
+
+ def __str__(self):
+ """
+ Print the smali code for this test.
+ """
+ all_tests = sorted(self.tests)
+ test_invoke = ""
+ test_groups = ""
+ for t in all_tests:
+ test_groups += str(t)
+ for t in all_tests:
+ test_invoke += self.TEST_GROUP_INVOKE_TEMPLATE.format(test_name=t.get_name())
+ main_func = self.MAIN_FUNCTION_TEMPLATE.format(test_group_invoke=test_invoke)
+
+ return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright('smali'),
+ test_groups = test_groups,
+ main_func = main_func)
+
+class Func(mixins.Named, mixins.NameComparableMixin):
+ """
+ A function that tests the functionality of a concrete type. Should only be
+ constructed by MainClass.add_test.
+ """
+
+ TEST_FUNCTION_TEMPLATE = """
+# public static void {fname}() {{
+# try {{
+# {farg} v = new {farg}();
+# System.out.println("Testing {tree}");
+# v.testAll();
+# System.out.println("Success: testing {tree}");
+# return;
+# }} catch (Exception e) {{
+# System.out.println("Failure: testing {tree}");
+# e.printStackTrace(System.out);
+# return;
+# }}
+# }}
+.method public static {fname}()V
+ .locals 7
+ :call_{fname}_try_start
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ new-instance v6, L{farg};
+ invoke-direct {{v6}}, L{farg};-><init>()V
+
+ const-string v3, "Testing {tree}"
+ invoke-virtual {{v2, v3}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ invoke-virtual {{v6}}, L{farg};->testAll()V
+
+ const-string v3, "Success: testing {tree}"
+ invoke-virtual {{v2, v3}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ return-void
+ :call_{fname}_try_end
+ .catch Ljava/lang/Exception; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :error_{fname}_start
+ :error_{fname}_start
+ move-exception v3
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v4, "Failure: testing {tree}"
+ invoke-virtual {{v2, v3}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ invoke-virtual {{v3,v2}}, Ljava/lang/Error;->printStackTrace(Ljava/io/PrintStream;)V
+ return-void
+.end method
+"""
+
+ OUTPUT_FORMAT = """
+Testing {tree}
+{test_output}
+Success: testing {tree}
+""".strip()
+
+ def __init__(self, farg):
+ """
+ Initialize a test function for the given argument
+ """
+ self.farg = farg
+
+ def __str__(self):
+ """
+ Print the smali code for this test function.
+ """
+ return self.TEST_FUNCTION_TEMPLATE.format(fname = self.get_name(),
+ farg = self.farg.get_name(),
+ tree = self.farg.get_tree())
+
+ def get_name(self):
+ """
+ Gets the name of this test function
+ """
+ return "TEST_FUNC_{}".format(self.farg.get_name())
+
+ def get_expected(self):
+ """
+ Get the expected output of this function.
+ """
+ return self.OUTPUT_FORMAT.format(
+ tree = self.farg.get_tree(),
+ test_output = self.farg.get_test_output().strip())
+
+class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ A class that will be instantiated to test interface initialization order.
+ """
+
+ TEST_CLASS_TEMPLATE = """{copyright}
+
+.class public L{class_name};
+.super Ljava/lang/Object;
+{implements_spec}
+
+# public class {class_name} implements {ifaces} {{
+#
+# public {class_name}() {{
+# }}
+.method public constructor <init>()V
+ .locals 2
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+# public String getCalledInterface() {{
+# throw new Error("Should not be called");
+# }}
+.method public getCalledInterface()V
+ .locals 2
+ const-string v0, "Should not be called"
+ new-instance v1, Ljava/lang/Error;
+ invoke-direct {{v1, v0}}, Ljava/lang/Error;-><init>(Ljava/lang/String;)V
+ throw v1
+.end method
+
+# public void testAll() {{
+# boolean failed = false;
+# Error exceptions = new Error("Test failures");
+.method public testAll()V
+ .locals 5
+ const/4 v0, 0
+ const-string v1, "Test failures"
+ new-instance v2, Ljava/lang/Error;
+ invoke-direct {{v2, v1}}, Ljava/lang/Error;-><init>(Ljava/lang/String;)V
+
+ {test_calls}
+
+# if (failed) {{
+ if-eqz v0, :end
+# throw exceptions;
+ throw v2
+ :end
+# }}
+ return-void
+# }}
+.end method
+
+{test_funcs}
+
+# }}
+"""
+
+ IMPLEMENTS_TEMPLATE = """
+.implements L{iface_name};
+"""
+
+ TEST_CALL_TEMPLATE = """
+# try {{
+# test_{iface}_super();
+# }} catch (Throwable t) {{
+# exceptions.addSuppressed(t);
+# failed = true;
+# }}
+ :try_{iface}_start
+ invoke-virtual {{p0}}, L{class_name};->test_{iface}_super()V
+ goto :error_{iface}_end
+ :try_{iface}_end
+ .catch Ljava/lang/Throwable; {{:try_{iface}_start .. :try_{iface}_end}} :error_{iface}_start
+ :error_{iface}_start
+ move-exception v3
+ invoke-virtual {{v2, v3}}, Ljava/lang/Throwable;->addSuppressed(Ljava/lang/Throwable;)V
+ const/4 v0, 1
+ :error_{iface}_end
+"""
+
+ TEST_FUNC_TEMPLATE = """
+# public void test_{iface}_super() {{
+# try {{
+# System.out.println("{class_name} -> {iface}.super.getCalledInterface(): " +
+# {iface}.super.getCalledInterface());
+# }} catch (NoSuchMethodError e) {{
+# System.out.println("{class_name} -> {iface}.super.getCalledInterface(): NoSuchMethodError");
+# }} catch (IncompatibleClassChangeError e) {{
+# System.out.println("{class_name} -> {iface}.super.getCalledInterface(): IncompatibleClassChangeError");
+# }} catch (Throwable t) {{
+# System.out.println("{class_name} -> {iface}.super.getCalledInterface(): Unknown error occurred");
+# throw t;
+# }}
+# }}
+.method public test_{iface}_super()V
+ .locals 3
+ sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ :try_start
+ const-string v1, "{class_name} -> {iface}.super.getCalledInterface(): "
+ invoke-super {{p0}}, L{iface};->getCalledInterface()Ljava/lang/String;
+ move-result-object v2
+
+ invoke-virtual {{v1, v2}}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v1
+
+ invoke-virtual {{v0, v1}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ return-void
+ :try_end
+ .catch Ljava/lang/NoSuchMethodError; {{:try_start .. :try_end}} :AME_catch
+ .catch Ljava/lang/IncompatibleClassChangeError; {{:try_start .. :try_end}} :ICCE_catch
+ .catch Ljava/lang/Throwable; {{:try_start .. :try_end}} :throwable_catch
+ :AME_catch
+ const-string v1, "{class_name} -> {iface}.super.getCalledInterface(): NoSuchMethodError"
+ invoke-virtual {{v0, v1}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ return-void
+ :ICCE_catch
+ const-string v1, "{class_name} -> {iface}.super.getCalledInterface(): IncompatibleClassChangeError"
+ invoke-virtual {{v0, v1}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ return-void
+ :throwable_catch
+ move-exception v2
+ const-string v1, "{class_name} -> {iface}.super.getCalledInterface(): Unknown error occurred"
+ invoke-virtual {{v0, v1}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ throw v2
+.end method
+""".strip()
+
+ OUTPUT_TEMPLATE = "{class_name} -> {iface}.super.getCalledInterface(): {result}"
+
+ def __init__(self, ifaces, name = None):
+ """
+ Initialize this test class which implements the given interfaces
+ """
+ self.ifaces = ifaces
+ if name is None:
+ self.class_name = "CLASS_"+gensym()
+ else:
+ self.class_name = name
+
+ def get_initial_build_version(self):
+ """
+ Returns a version of this class that can be used for the initial build (meaning no compiler
+ checks will be triggered).
+ """
+ return TestClass([i.get_initial_build_version() for i in self.ifaces], self.class_name)
+
+ def initial_build_different(self):
+ return False
+
+ def get_name(self):
+ """
+ Gets the name of this interface
+ """
+ return self.class_name
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{fname} {iftree}]".format(fname = self.get_name(), iftree = print_tree(self.ifaces))
+
+ def get_test_output(self):
+ return '\n'.join(map(lambda a: self.OUTPUT_TEMPLATE.format(class_name = self.get_name(),
+ iface = a.get_name(),
+ result = a.get_output()),
+ self.ifaces))
+
+ def __str__(self):
+ """
+ Print the smali code for this class.
+ """
+ funcs = '\n'.join(map(lambda a: self.TEST_FUNC_TEMPLATE.format(iface = a.get_name(),
+ class_name = self.get_name()),
+ self.ifaces))
+ calls = '\n'.join(map(lambda a: self.TEST_CALL_TEMPLATE.format(iface = a.get_name(),
+ class_name = self.get_name()),
+ self.ifaces))
+ s_ifaces = '\n'.join(map(lambda a: self.IMPLEMENTS_TEMPLATE.format(iface_name = a.get_name()),
+ self.ifaces))
+ j_ifaces = ', '.join(map(lambda a: a.get_name(), self.ifaces))
+ return self.TEST_CLASS_TEMPLATE.format(copyright = get_copyright('smali'),
+ implements_spec = s_ifaces,
+ ifaces = j_ifaces,
+ class_name = self.class_name,
+ test_funcs = funcs,
+ test_calls = calls)
+
+class IncompatibleClassChangeErrorResult(mixins.Named):
+ def get_name(self):
+ return "IncompatibleClassChangeError"
+
+ICCE = IncompatibleClassChangeErrorResult()
+
+class NoSuchMethodErrorResult(mixins.Named):
+ def get_name(self):
+ return "NoSuchMethodError"
+
+NSME = NoSuchMethodErrorResult()
+
+class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ An interface that will be used to test default method resolution order.
+ """
+
+ TEST_INTERFACE_TEMPLATE = """{copyright}
+.class public abstract interface L{class_name};
+.super Ljava/lang/Object;
+{implements_spec}
+
+# public interface {class_name} {extends} {ifaces} {{
+
+{funcs}
+
+# }}
+"""
+
+ ABSTRACT_FUNC_TEMPLATE = """
+"""
+
+ DEFAULT_FUNC_TEMPLATE = """
+# public default String getCalledInterface() {{
+# return "{class_name}";
+# }}
+.method public getCalledInterface()Ljava/lang/String;
+ .locals 1
+ const-string v0, "{class_name}"
+ return-object v0
+.end method
+"""
+
+ IMPLEMENTS_TEMPLATE = """
+.implements L{iface_name};
+"""
+
+ def __init__(self, ifaces, default, name = None):
+ """
+ Initialize interface with the given super-interfaces
+ """
+ self.ifaces = ifaces
+ self.default = default
+ if name is None:
+ end = "_DEFAULT" if default else ""
+ self.class_name = "INTERFACE_"+gensym()+end
+ else:
+ self.class_name = name
+
+ def get_initial_build_version(self):
+ """
+ Returns a version of this class that can be used for the initial build (meaning no compiler
+ checks will be triggered).
+ """
+ return TestInterface([i.get_initial_build_version() for i in self.ifaces],
+ True,
+ self.class_name)
+
+ def initial_build_different(self):
+ return not self.default
+
+ def get_name(self):
+ """
+ Gets the name of this interface
+ """
+ return self.class_name
+
+ def __iter__(self):
+ """
+ Performs depth-first traversal of the interface tree this interface is the
+ root of. Does not filter out repeats.
+ """
+ for i in self.ifaces:
+ yield i
+ yield from i
+
+ def get_called(self):
+ """
+ Get the interface whose default method would be called when calling the
+ CalledInterfaceName function.
+ """
+ all_ifaces = set(iface for iface in self if iface.default)
+ for i in all_ifaces:
+ if all(map(lambda j: i not in j.get_super_types(), all_ifaces)):
+ return i
+ return ICCE if any(map(lambda i: i.default, all_ifaces)) else NSME
+
+ def get_super_types(self):
+ """
+ Returns a set of all the supertypes of this interface
+ """
+ return set(i2 for i2 in self)
+
+ def get_output(self):
+ if self.default:
+ return self.get_name()
+ else:
+ return self.get_called().get_name()
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{class_name} {iftree}]".format(class_name = self.get_name(),
+ iftree = print_tree(self.ifaces))
+ def __str__(self):
+ """
+ Print the smali code for this interface.
+ """
+ s_ifaces = '\n'.join(map(lambda a: self.IMPLEMENTS_TEMPLATE.format(iface_name = a.get_name()),
+ self.ifaces))
+ j_ifaces = ', '.join(map(lambda a: a.get_name(), self.ifaces))
+ if self.default:
+ funcs = self.DEFAULT_FUNC_TEMPLATE.format(class_name = self.class_name)
+ else:
+ funcs = self.ABSTRACT_FUNC_TEMPLATE
+ return self.TEST_INTERFACE_TEMPLATE.format(copyright = get_copyright('smali'),
+ implements_spec = s_ifaces,
+ extends = "extends" if len(self.ifaces) else "",
+ ifaces = j_ifaces,
+ funcs = funcs,
+ class_name = self.class_name)
+
+def dump_tree(ifaces):
+ """
+ Yields all the interfaces transitively implemented by the set in
+ reverse-depth-first order
+ """
+ for i in ifaces:
+ yield from dump_tree(i.ifaces)
+ yield i
+
+def print_tree(ifaces):
+ """
+ Prints the tree for the given ifaces.
+ """
+ return " ".join(i.get_tree() for i in ifaces)
+
+# Cached output of subtree_sizes for speed of access.
+SUBTREES = [set(tuple(sorted(l)) for l in subtree_sizes(i)) for i in range(MAX_IFACE_DEPTH + 1)]
+
+def create_test_classes():
+ """
+ Yield all the test classes with the different interface trees
+ """
+ for num in range(1, MAX_IFACE_DEPTH + 1):
+ for split in SUBTREES[num]:
+ ifaces = []
+ for sub in split:
+ ifaces.append(list(create_interface_trees(sub)))
+ for supers in itertools.product(*ifaces):
+ yield TestClass(supers)
+
+def create_interface_trees(num):
+ """
+ Yield all the interface trees up to 'num' depth.
+ """
+ if num == 0:
+ yield TestInterface(tuple(), False)
+ yield TestInterface(tuple(), True)
+ return
+ for split in SUBTREES[num]:
+ ifaces = []
+ for sub in split:
+ ifaces.append(list(create_interface_trees(sub)))
+ for supers in itertools.product(*ifaces):
+ yield TestInterface(supers, False)
+ yield TestInterface(supers, True)
+ for selected in (set(dump_tree(supers)) - set(supers)):
+ yield TestInterface(tuple([selected] + list(supers)), True)
+ yield TestInterface(tuple([selected] + list(supers)), False)
+ # TODO Should add on some from higher up the tree.
+
+def create_all_test_files():
+ """
+ Creates all the objects representing the files in this test. They just need to
+ be dumped.
+ """
+ mc = MainClass()
+ classes = {mc}
+ for clazz in create_test_classes():
+ classes.add(clazz)
+ for i in dump_tree(clazz.ifaces):
+ classes.add(i)
+ mc.add_test(clazz)
+ return mc, classes
+
+def main(argv):
+ smali_dir = Path(argv[1])
+ if not smali_dir.exists() or not smali_dir.is_dir():
+ print("{} is not a valid smali dir".format(smali_dir), file=sys.stderr)
+ sys.exit(1)
+ expected_txt = Path(argv[2])
+ mainclass, all_files = create_all_test_files()
+ with expected_txt.open('w') as out:
+ print(mainclass.get_expected(), file=out)
+ for f in all_files:
+ f.dump(smali_dir)
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/test/971-iface-super-partial-compile-generated/build b/test/971-iface-super-partial-compile-generated/build
new file mode 100755
index 0000000000..1e9f8aadd5
--- /dev/null
+++ b/test/971-iface-super-partial-compile-generated/build
@@ -0,0 +1,50 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+# We will be making more files than the ulimit is set to allow. Remove it temporarily.
+OLD_ULIMIT=`ulimit -S`
+ulimit -S unlimited
+
+restore_ulimit() {
+ ulimit -S "$OLD_ULIMIT"
+}
+trap 'restore_ulimit' ERR
+
+# TODO: Support running with jack.
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p classes
+ mkdir -p src
+ echo "${JAVAC} \$@" >> ./javac_exec.sh
+ # This will use java_exec.sh to execute the javac compiler. It will place the
+ # compiled class files in ./classes and the expected values in expected.txt
+ #
+ # After this the src directory will contain the final versions of all files.
+ ./util-src/generate_java.py ./javac_exec.sh ./src ./classes ./expected.txt ./build_log
+else
+ mkdir -p ./smali
+ # Generate the smali files and expected.txt or fail
+ ./util-src/generate_smali.py ./smali ./expected.txt
+ # Use the default build script
+ ./default-build "$@" "$EXTRA_ARGS" --experimental default-methods
+fi
+
+# Reset the ulimit back to its initial value
+restore_ulimit
diff --git a/test/971-iface-super-partial-compile-generated/expected.txt b/test/971-iface-super-partial-compile-generated/expected.txt
new file mode 100644
index 0000000000..1ddd65d177
--- /dev/null
+++ b/test/971-iface-super-partial-compile-generated/expected.txt
@@ -0,0 +1 @@
+This file is generated by util-src/generate_smali.py do not directly modify!
diff --git a/test/971-iface-super-partial-compile-generated/info.txt b/test/971-iface-super-partial-compile-generated/info.txt
new file mode 100644
index 0000000000..bc1c42816e
--- /dev/null
+++ b/test/971-iface-super-partial-compile-generated/info.txt
@@ -0,0 +1,17 @@
+Smali-based tests for experimental interface default methods.
+
+This tests that interface method resolution order is correct in the presence of
+partial compilation/illegal invokes.
+
+Obviously needs to run under ART or a Java 8 Language runtime and compiler.
+
+When run smali test files are generated by the util-src/generate_smali.py
+script. If we run with --jvm we will use the util-src/generate_java.py script
+will generate equivalent java code based on the smali code.
+
+Care should be taken when updating the generate_smali.py script. It should always
+return equivalent output when run multiple times and the expected output should
+be valid.
+
+Do not modify the expected.txt file. It is generated on each run by
+util-src/generate_smali.py.
diff --git a/test/971-iface-super-partial-compile-generated/run b/test/971-iface-super-partial-compile-generated/run
new file mode 100755
index 0000000000..6d2930d463
--- /dev/null
+++ b/test/971-iface-super-partial-compile-generated/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+${RUN} "$@" --experimental default-methods
diff --git a/test/971-iface-super-partial-compile-generated/util-src/generate_java.py b/test/971-iface-super-partial-compile-generated/util-src/generate_java.py
new file mode 100755
index 0000000000..99b0479c8f
--- /dev/null
+++ b/test/971-iface-super-partial-compile-generated/util-src/generate_java.py
@@ -0,0 +1,138 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generate java test files for test 966.
+"""
+
+import generate_smali as base
+import os
+import sys
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# Allow us to import mixins.
+sys.path.append(str(Path(BUILD_TOP)/"art"/"test"/"utils"/"python"))
+
+import testgen.mixins as mixins
+import functools
+import operator
+import subprocess
+
+class JavaConverter(mixins.DumpMixin, mixins.Named, mixins.JavaFileMixin):
+ """
+ A class that can convert a SmaliFile to a JavaFile.
+ """
+ def __init__(self, inner):
+ self.inner = inner
+
+ def get_name(self):
+ """Gets the name of this file."""
+ return self.inner.get_name()
+
+ def __str__(self):
+ out = ""
+ for line in str(self.inner).splitlines(keepends = True):
+ if line.startswith("#"):
+ out += line[1:]
+ return out
+
+class Compiler:
+ def __init__(self, sources, javac, temp_dir, classes_dir):
+ self.javac = javac
+ self.temp_dir = temp_dir
+ self.classes_dir = classes_dir
+ self.sources = sources
+
+ def compile_files(self, args, files):
+ """
+ Compile the files given with the arguments given.
+ """
+ args = args.split()
+ files = list(map(str, files))
+ cmd = ['sh', '-a', '-e', '--', str(self.javac)] + args + files
+ print("Running compile command: {}".format(cmd))
+ subprocess.check_call(cmd)
+ print("Compiled {} files".format(len(files)))
+
+ def execute(self):
+ """
+ Compiles this test, doing partial compilation as necessary.
+ """
+ # Compile Main and all classes first. Force all interfaces to be default so that there will be
+ # no compiler problems (works since classes only implement 1 interface).
+ for f in self.sources:
+ if isinstance(f, base.TestInterface):
+ JavaConverter(f.get_specific_version(base.InterfaceType.default)).dump(self.temp_dir)
+ else:
+ JavaConverter(f).dump(self.temp_dir)
+ self.compile_files("-d {}".format(self.classes_dir), self.temp_dir.glob("*.java"))
+
+ # Now we compile the interfaces
+ ifaces = set(i for i in self.sources if isinstance(i, base.TestInterface))
+ filters = (lambda a: a.is_default(), lambda a: not a.is_default())
+ converters = (lambda a: JavaConverter(a.get_specific_version(base.InterfaceType.default)),
+ lambda a: JavaConverter(a.get_specific_version(base.InterfaceType.empty)))
+ while len(ifaces) != 0:
+ for iface_filter, iface_converter in zip(filters, converters):
+ # Find those ifaces where there are no (uncompiled) interfaces that are subtypes.
+ tops = set(filter(lambda a: iface_filter(a) and not any(map(lambda i: a in i.get_super_types(), ifaces)), ifaces))
+ files = []
+ # Dump these ones, they are getting compiled.
+ for f in tops:
+ out = JavaConverter(f)
+ out.dump(self.temp_dir)
+ files.append(self.temp_dir / out.get_file_name())
+ # Force all superinterfaces of these to be empty so there will be no conflicts
+ overrides = functools.reduce(operator.or_, map(lambda i: i.get_super_types(), tops), set())
+ for overridden in overrides:
+ out = iface_converter(overridden)
+ out.dump(self.temp_dir)
+ files.append(self.temp_dir / out.get_file_name())
+ self.compile_files("-d {outdir} -cp {outdir}".format(outdir = self.classes_dir), files)
+ # Remove these from the set of interfaces to be compiled.
+ ifaces -= tops
+ print("Finished compiling all files.")
+ return
+
+def main(argv):
+ javac_exec = Path(argv[1])
+ if not javac_exec.exists() or not javac_exec.is_file():
+ print("{} is not a shell script".format(javac_exec), file=sys.stderr)
+ sys.exit(1)
+ temp_dir = Path(argv[2])
+ if not temp_dir.exists() or not temp_dir.is_dir():
+ print("{} is not a valid source dir".format(temp_dir), file=sys.stderr)
+ sys.exit(1)
+ classes_dir = Path(argv[3])
+ if not classes_dir.exists() or not classes_dir.is_dir():
+ print("{} is not a valid classes directory".format(classes_dir), file=sys.stderr)
+ sys.exit(1)
+ expected_txt = Path(argv[4])
+ mainclass, all_files = base.create_all_test_files()
+
+ with expected_txt.open('w') as out:
+ print(mainclass.get_expected(), file=out)
+ print("Wrote expected output")
+
+ Compiler(all_files, javac_exec, temp_dir, classes_dir).execute()
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/test/971-iface-super-partial-compile-generated/util-src/generate_smali.py b/test/971-iface-super-partial-compile-generated/util-src/generate_smali.py
new file mode 100755
index 0000000000..f01c9043b9
--- /dev/null
+++ b/test/971-iface-super-partial-compile-generated/util-src/generate_smali.py
@@ -0,0 +1,689 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generate Smali test files for test 967.
+"""
+
+import os
+import sys
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# Allow us to import utils and mixins.
+sys.path.append(str(Path(BUILD_TOP)/"art"/"test"/"utils"/"python"))
+
+from testgen.utils import get_copyright, subtree_sizes, gensym, filter_blanks
+import testgen.mixins as mixins
+
+from enum import Enum
+from functools import total_ordering
+import itertools
+import string
+
+# The max depth the type tree can have.
+MAX_IFACE_DEPTH = 3
+
+class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin):
+ """
+ A Main.smali file containing the Main class and the main function. It will run
+ all the test functions we have.
+ """
+
+ MAIN_CLASS_TEMPLATE = """{copyright}
+
+.class public LMain;
+.super Ljava/lang/Object;
+
+# class Main {{
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+{test_funcs}
+
+{main_func}
+
+# }}
+"""
+
+ MAIN_FUNCTION_TEMPLATE = """
+# public static void main(String[] args) {{
+.method public static main([Ljava/lang/String;)V
+ .locals 0
+
+ {test_group_invoke}
+
+ return-void
+.end method
+# }}
+"""
+
+ TEST_GROUP_INVOKE_TEMPLATE = """
+# {test_name}();
+ invoke-static {{}}, {test_name}()V
+"""
+
+ def __init__(self):
+ """
+ Initialize this MainClass. We start out with no tests.
+ """
+ self.tests = set()
+
+ def get_expected(self):
+ """
+ Get the expected output of this test.
+ """
+ all_tests = sorted(self.tests)
+ return filter_blanks("\n".join(a.get_expected() for a in all_tests))
+
+ def add_test(self, ty):
+ """
+ Add a test for the concrete type 'ty'
+ """
+ self.tests.add(Func(ty))
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return "Main"
+
+ def __str__(self):
+ """
+ Print the MainClass smali code.
+ """
+ all_tests = sorted(self.tests)
+ test_invoke = ""
+ test_funcs = ""
+ for t in all_tests:
+ test_funcs += str(t)
+ for t in all_tests:
+ test_invoke += self.TEST_GROUP_INVOKE_TEMPLATE.format(test_name=t.get_name())
+ main_func = self.MAIN_FUNCTION_TEMPLATE.format(test_group_invoke=test_invoke)
+
+ return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright("smali"),
+ test_funcs = test_funcs,
+ main_func = main_func)
+
+class Func(mixins.Named, mixins.NameComparableMixin):
+ """
+ A function that tests the functionality of a concrete type. Should only be
+ constructed by MainClass.add_test.
+ """
+
+ TEST_FUNCTION_TEMPLATE = """
+# public static void {fname}() {{
+# {farg} v = null;
+# try {{
+# v = new {farg}();
+# }} catch (Throwable e) {{
+# System.out.println("Unexpected error occurred which creating {farg} instance");
+# e.printStackTrace(System.out);
+# return;
+# }}
+# try {{
+# v.callSupers();
+# return;
+# }} catch (Throwable e) {{
+# e.printStackTrace(System.out);
+# return;
+# }}
+# }}
+.method public static {fname}()V
+ .locals 7
+ sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ :new_{fname}_try_start
+ new-instance v0, L{farg};
+ invoke-direct {{v0}}, L{farg};-><init>()V
+ goto :call_{fname}_try_start
+ :new_{fname}_try_end
+ .catch Ljava/lang/Throwable; {{:new_{fname}_try_start .. :new_{fname}_try_end}} :new_error_{fname}_start
+ :new_error_{fname}_start
+ move-exception v6
+ const-string v5, "Unexpected error occurred which creating {farg} instance"
+ invoke-virtual {{v4,v5}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ invoke-virtual {{v6,v4}}, Ljava/lang/Throwable;->printStackTrace(Ljava/io/PrintStream;)V
+ return-void
+ :call_{fname}_try_start
+ invoke-virtual {{v0}}, L{farg};->callSupers()V
+ return-void
+ :call_{fname}_try_end
+ .catch Ljava/lang/Throwable; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :error_{fname}_start
+ :error_{fname}_start
+ move-exception v6
+ invoke-virtual {{v6,v4}}, Ljava/lang/Throwable;->printStackTrace(Ljava/io/PrintStream;)V
+ return-void
+.end method
+"""
+
+ def __init__(self, farg):
+ """
+ Initialize a test function for the given argument
+ """
+ self.farg = farg
+
+ def get_expected(self):
+ """
+ Get the expected output calling this function.
+ """
+ return "\n".join(self.farg.get_expected())
+
+ def get_name(self):
+ """
+ Get the name of this function
+ """
+ return "TEST_FUNC_{}".format(self.farg.get_name())
+
+ def __str__(self):
+ """
+ Print the smali code of this function.
+ """
+ return self.TEST_FUNCTION_TEMPLATE.format(fname = self.get_name(),
+ farg = self.farg.get_name())
+
+class InterfaceCallResponse(Enum):
+ """
+ An enumeration of all the different types of responses to an interface call we can have
+ """
+ NoError = 0
+ NoSuchMethodError = 1
+ AbstractMethodError = 2
+ IncompatibleClassChangeError = 3
+
+ def get_output_format(self):
+ if self == InterfaceCallResponse.NoError:
+ return "No exception thrown for {iface_name}.super.call() on {tree}\n"
+ elif self == InterfaceCallResponse.AbstractMethodError:
+ return "AbstractMethodError thrown for {iface_name}.super.call() on {tree}\n"
+ elif self == InterfaceCallResponse.NoSuchMethodError:
+ return "NoSuchMethodError thrown for {iface_name}.super.call() on {tree}\n"
+ else:
+ return "IncompatibleClassChangeError thrown for {iface_name}.super.call() on {tree}\n"
+
+class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ A class that will be instantiated to test interface super behavior.
+ """
+
+ TEST_CLASS_TEMPLATE = """{copyright}
+
+.class public L{class_name};
+.super Ljava/lang/Object;
+{implements_spec}
+
+# public class {class_name} implements {ifaces} {{
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+# public void call() {{
+# throw new Error("{class_name}.call(v) should never get called!");
+# }}
+.method public call()V
+ .locals 2
+ new-instance v0, Ljava/lang/Error;
+ const-string v1, "{class_name}.call(v) should never get called!"
+ invoke-direct {{v0, v1}}, Ljava/lang/Error;-><init>(Ljava/lang/String;)V
+ throw v0
+.end method
+
+# public void callSupers() {{
+.method public callSupers()V
+ .locals 4
+ sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ {super_calls}
+
+ return-void
+.end method
+# }}
+
+# }}
+"""
+ SUPER_CALL_TEMPLATE = """
+# try {{
+# System.out.println("Calling {iface_name}.super.call() on {tree}");
+# {iface_name}.super.call();
+# System.out.println("No exception thrown for {iface_name}.super.call() on {tree}");
+# }} catch (AbstractMethodError ame) {{
+# System.out.println("AbstractMethodError thrown for {iface_name}.super.call() on {tree}");
+# }} catch (NoSuchMethodError nsme) {{
+# System.out.println("NoSuchMethodError thrown for {iface_name}.super.call() on {tree}");
+# }} catch (IncompatibleClassChangeError icce) {{
+# System.out.println("IncompatibleClassChangeError thrown for {iface_name}.super.call() on {tree}");
+# }} catch (Throwable t) {{
+# System.out.println("Unknown error thrown for {iface_name}.super.call() on {tree}");
+# throw t;
+# }}
+ :call_{class_name}_{iface_name}_try_start
+ const-string v1, "Calling {iface_name}.super.call() on {tree}"
+ invoke-virtual {{v0, v1}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ invoke-super {{p0}}, L{iface_name};->call()V
+ const-string v1, "No exception thrown for {iface_name}.super.call() on {tree}"
+ invoke-virtual {{v0, v1}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :call_{class_name}_{iface_name}_end
+ :call_{class_name}_{iface_name}_try_end
+ .catch Ljava/lang/AbstractMethodError; {{:call_{class_name}_{iface_name}_try_start .. :call_{class_name}_{iface_name}_try_end}} :AME_{class_name}_{iface_name}_start
+ .catch Ljava/lang/NoSuchMethodError; {{:call_{class_name}_{iface_name}_try_start .. :call_{class_name}_{iface_name}_try_end}} :NSME_{class_name}_{iface_name}_start
+ .catch Ljava/lang/IncompatibleClassChangeError; {{:call_{class_name}_{iface_name}_try_start .. :call_{class_name}_{iface_name}_try_end}} :ICCE_{class_name}_{iface_name}_start
+ .catch Ljava/lang/Throwable; {{:call_{class_name}_{iface_name}_try_start .. :call_{class_name}_{iface_name}_try_end}} :error_{class_name}_{iface_name}_start
+ :AME_{class_name}_{iface_name}_start
+ const-string v1, "AbstractMethodError thrown for {iface_name}.super.call() on {tree}"
+ invoke-virtual {{v0, v1}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :call_{class_name}_{iface_name}_end
+ :NSME_{class_name}_{iface_name}_start
+ const-string v1, "NoSuchMethodError thrown for {iface_name}.super.call() on {tree}"
+ invoke-virtual {{v0, v1}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :call_{class_name}_{iface_name}_end
+ :ICCE_{class_name}_{iface_name}_start
+ const-string v1, "IncompatibleClassChangeError thrown for {iface_name}.super.call() on {tree}"
+ invoke-virtual {{v0, v1}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ goto :call_{class_name}_{iface_name}_end
+ :error_{class_name}_{iface_name}_start
+ move-exception v2
+ const-string v1, "Unknown error thrown for {iface_name}.super.call() on {tree}"
+ invoke-virtual {{v0, v1}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ throw v2
+ :call_{class_name}_{iface_name}_end
+"""
+
+ IMPLEMENTS_TEMPLATE = """
+.implements L{iface_name};
+"""
+
+ OUTPUT_PREFIX = "Calling {iface_name}.super.call() on {tree}\n"
+
+ def __init__(self, ifaces):
+ """
+ Initialize this test class which implements the given interfaces
+ """
+ self.ifaces = ifaces
+ self.class_name = "CLASS_"+gensym()
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return self.class_name
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{class_name} {iface_tree}]".format(class_name = self.class_name,
+ iface_tree = print_tree(self.ifaces))
+
+ def __iter__(self):
+ """
+ Step through all interfaces implemented transitively by this class
+ """
+ for i in self.ifaces:
+ yield i
+ yield from i
+
+ def get_expected(self):
+ for iface in self.ifaces:
+ yield self.OUTPUT_PREFIX.format(iface_name = iface.get_name(), tree = self.get_tree())
+ yield from iface.get_expected()
+ yield iface.get_response().get_output_format().format(iface_name = iface.get_name(),
+ tree = self.get_tree())
+
+ def __str__(self):
+ """
+ Print the smali code of this class.
+ """
+ s_ifaces = '\n'.join(map(lambda a: self.IMPLEMENTS_TEMPLATE.format(iface_name = a.get_name()),
+ self.ifaces))
+ j_ifaces = ', '.join(map(lambda a: a.get_name(), self.ifaces))
+ super_template = self.SUPER_CALL_TEMPLATE
+ super_calls = "\n".join(super_template.format(iface_name = iface.get_name(),
+ class_name = self.get_name(),
+ tree = self.get_tree()) for iface in self.ifaces)
+ return self.TEST_CLASS_TEMPLATE.format(copyright = get_copyright('smali'),
+ ifaces = j_ifaces,
+ implements_spec = s_ifaces,
+ tree = self.get_tree(),
+ class_name = self.class_name,
+ super_calls = super_calls)
+
+class InterfaceType(Enum):
+ """
+ An enumeration of all the different types of interfaces we can have.
+
+ default: It has a default method
+ abstract: It has a method declared but not defined
+ empty: It does not have the method
+ """
+ default = 0
+ abstract = 1
+ empty = 2
+
+ def get_suffix(self):
+ if self == InterfaceType.default:
+ return "_DEFAULT"
+ elif self == InterfaceType.abstract:
+ return "_ABSTRACT"
+ elif self == InterfaceType.empty:
+ return "_EMPTY"
+ else:
+ raise TypeError("Interface type had illegal value.")
+
+class ConflictInterface:
+ """
+ A singleton representing a conflict of default methods.
+ """
+
+ def is_conflict(self):
+ """
+ Returns true if this is a conflict interface and calling the method on this interface will
+ result in an IncompatibleClassChangeError.
+ """
+ return True
+
+ def is_abstract(self):
+ """
+ Returns true if this is an abstract interface and calling the method on this interface will
+ result in an AbstractMethodError.
+ """
+ return False
+
+ def is_empty(self):
+ """
+ Returns true if this is an abstract interface and calling the method on this interface will
+ result in a NoSuchMethodError.
+ """
+ return False
+
+ def is_default(self):
+ """
+ Returns true if this is a default interface and calling the method on this interface will
+ result in a method actually being called.
+ """
+ return False
+
+ def get_response(self):
+ return InterfaceCallResponse.IncompatibleClassChangeError
+
+CONFLICT_TYPE = ConflictInterface()
+
+class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ An interface that will be used to test default method resolution order.
+ """
+
+ TEST_INTERFACE_TEMPLATE = """{copyright}
+.class public abstract interface L{class_name};
+.super Ljava/lang/Object;
+{implements_spec}
+
+# public interface {class_name} {extends} {ifaces} {{
+
+{func}
+
+# }}
+"""
+
+ SUPER_CALL_TEMPLATE = TestClass.SUPER_CALL_TEMPLATE
+ OUTPUT_PREFIX = TestClass.OUTPUT_PREFIX
+
+ DEFAULT_FUNC_TEMPLATE = """
+# public default void call() {{
+.method public call()V
+ .locals 4
+ sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ {super_calls}
+
+ return-void
+.end method
+# }}
+"""
+
+ ABSTRACT_FUNC_TEMPLATE = """
+# public void call();
+.method public abstract call()V
+.end method
+"""
+
+ EMPTY_FUNC_TEMPLATE = """"""
+
+ IMPLEMENTS_TEMPLATE = """
+.implements L{iface_name};
+"""
+
+ def __init__(self, ifaces, iface_type, full_name = None):
+ """
+ Initialize interface with the given super-interfaces
+ """
+ self.ifaces = sorted(ifaces)
+ self.iface_type = iface_type
+ if full_name is None:
+ end = self.iface_type.get_suffix()
+ self.class_name = "INTERFACE_"+gensym()+end
+ else:
+ self.class_name = full_name
+
+ def get_specific_version(self, v):
+ """
+ Returns a copy of this interface of the given type for use in partial compilation.
+ """
+ return TestInterface(self.ifaces, v, full_name = self.class_name)
+
+ def get_super_types(self):
+ """
+ Returns a set of all the supertypes of this interface
+ """
+ return set(i2 for i2 in self)
+
+ def is_conflict(self):
+ """
+ Returns true if this is a conflict interface and calling the method on this interface will
+ result in an IncompatibleClassChangeError.
+ """
+ return False
+
+ def is_abstract(self):
+ """
+ Returns true if this is an abstract interface and calling the method on this interface will
+ result in an AbstractMethodError.
+ """
+ return self.iface_type == InterfaceType.abstract
+
+ def is_empty(self):
+ """
+ Returns true if this is an abstract interface and calling the method on this interface will
+ result in a NoSuchMethodError.
+ """
+ return self.iface_type == InterfaceType.empty
+
+ def is_default(self):
+ """
+ Returns true if this is a default interface and calling the method on this interface will
+ result in a method actually being called.
+ """
+ return self.iface_type == InterfaceType.default
+
+ def get_expected(self):
+ response = self.get_response()
+ if response == InterfaceCallResponse.NoError:
+ for iface in self.ifaces:
+ if self.is_default():
+ yield self.OUTPUT_PREFIX.format(iface_name = iface.get_name(), tree = self.get_tree())
+ yield from iface.get_expected()
+ if self.is_default():
+ yield iface.get_response().get_output_format().format(iface_name = iface.get_name(),
+ tree = self.get_tree())
+
+ def get_response(self):
+ if self.is_default():
+ return InterfaceCallResponse.NoError
+ elif self.is_abstract():
+ return InterfaceCallResponse.AbstractMethodError
+ elif len(self.ifaces) == 0:
+ return InterfaceCallResponse.NoSuchMethodError
+ else:
+ return self.get_called().get_response()
+
+ def get_called(self):
+ """
+ Returns the interface that will be called when the method on this class is invoked or
+ CONFLICT_TYPE if there is no interface that will be called.
+ """
+ if not self.is_empty() or len(self.ifaces) == 0:
+ return self
+ else:
+ best = self
+ for super_iface in self.ifaces:
+ super_best = super_iface.get_called()
+ if super_best.is_conflict():
+ return CONFLICT_TYPE
+ elif best.is_default():
+ if super_best.is_default():
+ return CONFLICT_TYPE
+ elif best.is_abstract():
+ if super_best.is_default():
+ best = super_best
+ else:
+ assert best.is_empty()
+ best = super_best
+ return best
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return self.class_name
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{class_name} {iftree}]".format(class_name = self.get_name(),
+ iftree = print_tree(self.ifaces))
+
+ def __iter__(self):
+ """
+ Performs depth-first traversal of the interface tree this interface is the
+ root of. Does not filter out repeats.
+ """
+ for i in self.ifaces:
+ yield i
+ yield from i
+
+ def __str__(self):
+ """
+ Print the smali code of this interface.
+ """
+ s_ifaces = '\n'.join(map(lambda a: self.IMPLEMENTS_TEMPLATE.format(iface_name = a.get_name()),
+ self.ifaces))
+ j_ifaces = ', '.join(map(lambda a: a.get_name(), self.ifaces))
+ if self.is_default():
+ super_template = self.SUPER_CALL_TEMPLATE
+ super_calls ="\n".join(super_template.format(iface_name = iface.get_name(),
+ class_name = self.get_name(),
+ tree = self.get_tree()) for iface in self.ifaces)
+ funcs = self.DEFAULT_FUNC_TEMPLATE.format(super_calls = super_calls)
+ elif self.is_abstract():
+ funcs = self.ABSTRACT_FUNC_TEMPLATE.format()
+ else:
+ funcs = ""
+ return self.TEST_INTERFACE_TEMPLATE.format(copyright = get_copyright('smali'),
+ implements_spec = s_ifaces,
+ extends = "extends" if len(self.ifaces) else "",
+ ifaces = j_ifaces,
+ func = funcs,
+ tree = self.get_tree(),
+ class_name = self.class_name)
+
+def print_tree(ifaces):
+ """
+ Prints a list of iface trees
+ """
+ return " ".join(i.get_tree() for i in ifaces)
+
+# The deduplicated output of subtree_sizes for each size up to
+# MAX_LEAF_IFACE_PER_OBJECT.
+SUBTREES = [set(tuple(sorted(l)) for l in subtree_sizes(i))
+ for i in range(MAX_IFACE_DEPTH + 1)]
+
+def create_test_classes():
+ """
+ Yield all the test classes with the different interface trees
+ """
+ for num in range(1, MAX_IFACE_DEPTH + 1):
+ for split in SUBTREES[num]:
+ ifaces = []
+ for sub in split:
+ ifaces.append(list(create_interface_trees(sub)))
+ for supers in itertools.product(*ifaces):
+ yield TestClass(supers)
+
+def create_interface_trees(num):
+ """
+ Yield all the interface trees up to 'num' depth.
+ """
+ if num == 0:
+ for iftype in InterfaceType:
+ yield TestInterface(tuple(), iftype)
+ return
+ for split in SUBTREES[num]:
+ ifaces = []
+ for sub in split:
+ ifaces.append(list(create_interface_trees(sub)))
+ for supers in itertools.product(*ifaces):
+ for iftype in InterfaceType:
+ yield TestInterface(supers, iftype)
+
+def create_all_test_files():
+ """
+ Creates all the objects representing the files in this test. They just need to
+ be dumped.
+ """
+ mc = MainClass()
+ classes = {mc}
+ for clazz in create_test_classes():
+ classes.add(clazz)
+ for i in clazz:
+ classes.add(i)
+ mc.add_test(clazz)
+ return mc, classes
+
+def main(argv):
+ smali_dir = Path(argv[1])
+ if not smali_dir.exists() or not smali_dir.is_dir():
+ print("{} is not a valid smali dir".format(smali_dir), file=sys.stderr)
+ sys.exit(1)
+ expected_txt = Path(argv[2])
+ mainclass, all_files = create_all_test_files()
+ with expected_txt.open('w') as out:
+ print(mainclass.get_expected(), file=out)
+ for f in all_files:
+ f.dump(smali_dir)
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index f74a516486..b922b4576e 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -30,6 +30,7 @@ LIBARTTEST_COMMON_SRC_FILES := \
051-thread/thread_test.cc \
117-nopatchoat/nopatchoat.cc \
1337-gc-coverage/gc_coverage.cc \
+ 136-daemon-jni-shutdown/daemon_jni_shutdown.cc \
137-cfi/cfi.cc \
139-register-natives/regnative.cc \
141-class-unload/jni_unload.cc \
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 8c5ff0a938..25560f9d3d 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -239,7 +239,9 @@ TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS := \
960-default-smali \
961-default-iface-resolution-generated \
964-default-iface-init-generated \
- 968-default-partial-compile-generated
+ 968-default-partial-compile-generated \
+ 970-iface-super-resolution-generated \
+ 971-iface-super-partial-compile-generated
# Check if we have python3 to run our tests.
ifeq ($(wildcard /usr/bin/python3),)
@@ -267,6 +269,16 @@ endif
TEST_ART_BROKEN_PREBUILD_RUN_TESTS :=
+# 143-string-value tests for a LOG(E) tag, which is only supported on host.
+TEST_ART_BROKEN_TARGET_RUN_TESTS := \
+ 143-string-value \
+
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_TARGET_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+
+TEST_ART_BROKEN_TARGET_RUN_TESTS :=
+
# 554-jit-profile-file is disabled because it needs a primary oat file to know what it should save.
TEST_ART_BROKEN_NO_PREBUILD_TESTS := \
117-nopatchoat \
@@ -538,6 +550,11 @@ TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS := \
484-checker-register-hints \
537-checker-arraycopy
+# Tests that should fail in the read barrier configuration with JIT.
+# 141: Disabled because of intermittent failures on the ART Builtbot (b/25866001).
+TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS := \
+ 141-class-unload
+
ifeq ($(ART_USE_READ_BARRIER),true)
ifneq (,$(filter default,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
@@ -552,10 +569,18 @@ ifeq ($(ART_USE_READ_BARRIER),true)
$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
$(TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
+
+ ifneq (,$(filter jit,$(COMPILER_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
+ $(PREBUILD_TYPES),jit,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \
+ $(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
+ $(TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ endif
endif
TEST_ART_BROKEN_DEFAULT_READ_BARRIER_RUN_TESTS :=
TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS :=
+TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS :=
# Tests that should fail in the heap poisoning configuration with the default (Quick) compiler.
# 137: Quick has no support for read barriers and punts to the
@@ -588,8 +613,7 @@ endif
TEST_ART_BROKEN_DEFAULT_HEAP_POISONING_RUN_TESTS :=
TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS :=
-
-# Tests broken by multi-image. b/26317072
+# Tests broken by multi-image.
TEST_ART_BROKEN_MULTI_IMAGE_RUN_TESTS := \
476-checker-ctor-memory-barrier \
530-checker-lse
@@ -663,7 +687,8 @@ ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libarttestd$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
- $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION)
+ $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION) \
+ $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION)
ifneq ($(HOST_PREFER_32_BIT),true)
ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
@@ -671,7 +696,8 @@ ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libarttestd$(ART_HOST_SHLIB_EXTENSION) \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
- $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION)
+ $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION) \
+ $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION)
endif
# Create a rule to build and run a tests following the form:
@@ -862,20 +888,20 @@ define define-test-art-run-test
ifeq ($(9),multiimage)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES
run_test_options += --multi-image
- ifeq ($(1),host)
- prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_multi_$(13))
- else
- prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_no-pic_multi_$(13))
- endif
+ ifeq ($(1),host)
+ prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_multi_$(13))
+ else
+ prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_no-pic_multi_$(13))
+ endif
else
ifeq ($(9),multipicimage)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PICIMAGE_RULES
- run_test_options += --pic-image --multi-image
- ifeq ($(1),host)
- prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_pic_multi_$(13))
- else
- prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_pic_multi_$(13))
- endif
+ run_test_options += --pic-image --multi-image
+ ifeq ($(1),host)
+ prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_pic_multi_$(13))
+ else
+ prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_pic_multi_$(13))
+ endif
else
$$(error found $(9) expected $(IMAGE_TYPES))
endif
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index dacb7b9d3b..e004b6cff4 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -361,9 +361,7 @@ fi
dex2oat_cmdline="true"
mkdir_cmdline="mkdir -p ${DEX_LOCATION}/dalvik-cache/$ISA"
-# TODO: allow app-image to work with multi-image. b/26317072
-app_image=""
-# app_image="--app-image-file=$DEX_LOCATION/dalvik-cache/$ISA/$(echo $DEX_LOCATION/$TEST_NAME.jar/classes.art | cut -d/ -f 2- | sed "s:/:@:g")"
+app_image="--app-image-file=$DEX_LOCATION/dalvik-cache/$ISA/$(echo $DEX_LOCATION/$TEST_NAME.jar/classes.art | cut -d/ -f 2- | sed "s:/:@:g")"
if [ "$PREBUILD" = "y" ]; then
dex2oat_cmdline="$INVOKE_WITH $ANDROID_ROOT/bin/dex2oatd \
diff --git a/test/run-test b/test/run-test
index ec34e0946a..033e2c6467 100755
--- a/test/run-test
+++ b/test/run-test
@@ -683,11 +683,6 @@ function arch_supports_read_barrier() {
# Tests named '<number>-checker-*' will also have their CFGs verified with
# Checker when compiled with Optimizing on host.
if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
- # Build Checker DEX files without dx's optimizations so the input to dex2oat
- # better resembles the Java source. We always build the DEX the same way, even
- # if Checker is not invoked and the test only runs the program.
- build_args="${build_args} --dx-option --no-optimize"
-
# Jack does not necessarily generate the same DEX output than dx. Because these tests depend
# on a particular DEX output, keep building them with dx for now (b/19467889).
USE_JACK="false"
@@ -738,7 +733,7 @@ fi
# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and ART output to 2MB.
build_file_size_limit=2048
run_file_size_limit=2048
-if echo "$test_dir" | grep -Eq "(083|089|964)" > /dev/null; then
+if echo "$test_dir" | grep -Eq "(083|089|964|971)" > /dev/null; then
build_file_size_limit=5120
run_file_size_limit=5120
fi
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 02787fba43..9e02ce2f90 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -48,7 +48,7 @@ done
if [[ $mode == "host" ]]; then
make_command="make $j_arg $showcommands build-art-host-tests $common_targets ${out_dir}/host/linux-x86/lib/libjavacoretests.so ${out_dir}/host/linux-x86/lib64/libjavacoretests.so"
elif [[ $mode == "target" ]]; then
- make_command="make $j_arg $showcommands build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh ${out_dir}/host/linux-x86/bin/adb"
+ make_command="make $j_arg $showcommands build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh ${out_dir}/host/linux-x86/bin/adb libstdc++"
fi
echo "Executing $make_command"
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 80d2279fc4..f2c810a8b8 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -171,12 +171,6 @@
bug: 25437292
},
{
- description: "Assertion failing on the concurrent collector configuration.",
- result: EXEC_FAILED,
- names: ["jsr166.LinkedTransferQueueTest#testTransfer2"],
- bug: 25883050
-},
-{
description: "Failing tests after enso move.",
result: EXEC_FAILED,
bug: 26326992,
@@ -213,5 +207,57 @@
bug: 26395656,
modes: [device],
names: ["org.apache.harmony.tests.java.lang.ProcessTest#test_getOutputStream"]
+},
+{
+ description: "Missing resource in classpath",
+ result: EXEC_FAILED,
+ modes: [device],
+ names: ["libcore.java.util.prefs.OldAbstractPreferencesTest#testClear",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportNode",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportSubtree",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGet",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetBoolean",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetByteArray",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetDouble",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetFloat",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetInt",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetLong",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testKeys",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testNodeExists",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPut",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutBoolean",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutByteArray",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutDouble",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutFloat",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutInt",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutLong",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testRemove",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testRemoveNode",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testSync",
+ "libcore.java.util.prefs.PreferencesTest#testHtmlEncoding",
+ "libcore.java.util.prefs.PreferencesTest#testPreferencesClobbersExistingFiles",
+ "org.apache.harmony.tests.java.util.PropertiesTest#test_storeToXMLLjava_io_OutputStreamLjava_lang_StringLjava_lang_String",
+ "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportNode",
+ "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportSubtree",
+ "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testFlush",
+ "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testSync",
+ "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"]
+},
+{
+ description: "libnativehelper_compat_libc++ loading issue",
+ result: EXEC_FAILED,
+ modes: [device],
+ names: ["dalvik.system.JniTest#testGetSuperclass",
+ "dalvik.system.JniTest#testPassingBooleans",
+ "dalvik.system.JniTest#testPassingBytes",
+ "dalvik.system.JniTest#testPassingChars",
+ "dalvik.system.JniTest#testPassingClass",
+ "dalvik.system.JniTest#testPassingDoubles",
+ "dalvik.system.JniTest#testPassingFloats",
+ "dalvik.system.JniTest#testPassingInts",
+ "dalvik.system.JniTest#testPassingLongs",
+ "dalvik.system.JniTest#testPassingObjectReferences",
+ "dalvik.system.JniTest#testPassingShorts",
+ "dalvik.system.JniTest#testPassingThis"]
}
]
diff --git a/tools/libcore_failures_concurrent_collector.txt b/tools/libcore_failures_concurrent_collector.txt
new file mode 100644
index 0000000000..2cb2c50257
--- /dev/null
+++ b/tools/libcore_failures_concurrent_collector.txt
@@ -0,0 +1,35 @@
+/*
+ * This file contains expectations for ART's buildbot's concurrent collector
+ * configurations. The purpose of this file is to temporary and quickly list
+ * failing tests and not break the bots on the CC configurations, until they
+ * are fixed or until the libcore expectation files get properly updated. The
+ * script that uses this file is art/tools/run-libcore-tests.sh.
+ *
+ * It is also used to enable AOSP experiments, and not mess up with CTS's
+ * expectations.
+ */
+
+[
+{
+ description: "Assertion failing on the concurrent collector configuration.",
+ result: EXEC_FAILED,
+ names: ["jsr166.LinkedTransferQueueTest#testTransfer2",
+ "jsr166.LinkedTransferQueueTest#testWaitingConsumer"],
+ bug: 25883050
+},
+{
+ description: "libcore.java.lang.OldSystemTest#test_gc failure on armv8-concurrent-collector.",
+ result: EXEC_FAILED,
+ names: ["libcore.java.lang.OldSystemTest#test_gc"],
+ bug: 26155567
+},
+{
+ description: "TimeoutException on host-{x86,x86-64}-concurrent-collector",
+ result: EXEC_FAILED,
+ modes: [host],
+ names: ["libcore.java.util.zip.GZIPOutputStreamTest#testSyncFlushEnabled",
+ "libcore.java.util.zip.ZipFileTest#testZipFileWithLotsOfEntries",
+ "libcore.java.util.zip.ZipInputStreamTest#testLongMessage"],
+ bug: 26507762
+}
+]
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 11ed8b94be..f346239763 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -32,6 +32,12 @@ if [ ! -f $test_jar ]; then
exit 1
fi
+expectations="--expectations art/tools/libcore_failures.txt"
+if [ "x$ART_USE_READ_BARRIER" = xtrue ]; then
+ # Tolerate some more failures on the concurrent collector configurations.
+ expectations="$expectations --expectations art/tools/libcore_failures_concurrent_collector.txt"
+fi
+
emulator="no"
if [ "$ANDROID_SERIAL" = "emulator-5554" ]; then
emulator="yes"
@@ -105,4 +111,4 @@ vogar_args="$vogar_args --timeout 480"
# Run the tests using vogar.
echo "Running tests for the following test packages:"
echo ${working_packages[@]} | tr " " "\n"
-vogar $vogar_args --vm-arg -Xusejit:true --expectations art/tools/libcore_failures.txt --classpath $jsr166_test_jar --classpath $test_jar ${working_packages[@]}
+vogar $vogar_args --vm-arg -Xusejit:true $expectations --classpath $jsr166_test_jar --classpath $test_jar ${working_packages[@]}