summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.mk4
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc19
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.h8
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64_test.cc2
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S101
-rw-r--r--runtime/arch/mips64/registers_mips64.h1
-rw-r--r--runtime/art_method-inl.h34
-rw-r--r--runtime/art_method.h11
-rw-r--r--runtime/base/stl_util.h14
-rw-r--r--runtime/class_linker.cc147
-rw-r--r--runtime/class_linker.h13
-rw-r--r--runtime/class_linker_test.cc9
-rw-r--r--runtime/class_table-inl.h16
-rw-r--r--runtime/class_table.cc19
-rw-r--r--runtime/class_table.h17
-rw-r--r--runtime/debugger.cc2
-rw-r--r--runtime/fault_handler.cc2
-rw-r--r--runtime/gc/collector/concurrent_copying.cc2
-rw-r--r--runtime/gc/heap.cc3
-rw-r--r--runtime/gc/reference_processor.cc4
-rw-r--r--runtime/gc/reference_queue.cc29
-rw-r--r--runtime/gc/reference_queue.h21
-rw-r--r--runtime/gc/reference_queue_test.cc17
-rw-r--r--runtime/gc/space/image_space.cc23
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/instrumentation.cc2
-rw-r--r--runtime/interpreter/interpreter.cc4
-rw-r--r--runtime/interpreter/mterp/arm64/alt_stub.S12
-rw-r--r--runtime/interpreter/mterp/arm64/bincmp.S37
-rw-r--r--runtime/interpreter/mterp/arm64/binop.S33
-rw-r--r--runtime/interpreter/mterp/arm64/binop2addr.S30
-rw-r--r--runtime/interpreter/mterp/arm64/binopLit16.S28
-rw-r--r--runtime/interpreter/mterp/arm64/binopLit8.S30
-rw-r--r--runtime/interpreter/mterp/arm64/binopWide.S30
-rw-r--r--runtime/interpreter/mterp/arm64/binopWide2addr.S29
-rw-r--r--runtime/interpreter/mterp/arm64/entry.S63
-rw-r--r--runtime/interpreter/mterp/arm64/fallback.S3
-rw-r--r--runtime/interpreter/mterp/arm64/fbinop.S19
-rw-r--r--runtime/interpreter/mterp/arm64/fbinop2addr.S18
-rw-r--r--runtime/interpreter/mterp/arm64/fcmp.S20
-rw-r--r--runtime/interpreter/mterp/arm64/footer.S170
-rw-r--r--runtime/interpreter/mterp/arm64/funopNarrow.S18
-rw-r--r--runtime/interpreter/mterp/arm64/funopNarrower.S17
-rw-r--r--runtime/interpreter/mterp/arm64/funopWide.S17
-rw-r--r--runtime/interpreter/mterp/arm64/funopWider.S17
-rw-r--r--runtime/interpreter/mterp/arm64/header.S288
-rw-r--r--runtime/interpreter/mterp/arm64/invoke.S19
-rw-r--r--runtime/interpreter/mterp/arm64/op_add_double.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_add_double_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_add_float.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_add_float_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_add_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_add_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_add_int_lit16.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_add_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_add_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_add_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_aget.S28
-rw-r--r--runtime/interpreter/mterp/arm64/op_aget_boolean.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_aget_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_aget_char.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_aget_object.S20
-rw-r--r--runtime/interpreter/mterp/arm64/op_aget_short.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_aget_wide.S21
-rw-r--r--runtime/interpreter/mterp/arm64/op_and_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_and_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_and_int_lit16.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_and_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_and_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_and_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_aput.S28
-rw-r--r--runtime/interpreter/mterp/arm64/op_aput_boolean.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_aput_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_aput_char.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_aput_object.S13
-rw-r--r--runtime/interpreter/mterp/arm64/op_aput_short.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_aput_wide.S21
-rw-r--r--runtime/interpreter/mterp/arm64/op_array_length.S12
-rw-r--r--runtime/interpreter/mterp/arm64/op_check_cast.S16
-rw-r--r--runtime/interpreter/mterp/arm64/op_cmp_long.S13
-rw-r--r--runtime/interpreter/mterp/arm64/op_cmpg_double.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_cmpg_float.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_cmpl_double.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_cmpl_float.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_const.S9
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_16.S7
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_4.S8
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_class.S12
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_high16.S8
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_string.S12
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_string_jumbo.S14
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_wide.S13
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_wide_16.S8
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_wide_32.S10
-rw-r--r--runtime/interpreter/mterp/arm64/op_const_wide_high16.S8
-rw-r--r--runtime/interpreter/mterp/arm64/op_div_double.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_div_double_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_div_float.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_div_float_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_div_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_div_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_div_int_lit16.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_div_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_div_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_div_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_double_to_float.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_double_to_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_double_to_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_fill_array_data.S13
-rw-r--r--runtime/interpreter/mterp/arm64/op_filled_new_array.S18
-rw-r--r--runtime/interpreter/mterp/arm64/op_filled_new_array_range.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_float_to_double.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_float_to_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_float_to_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_goto.S28
-rw-r--r--runtime/interpreter/mterp/arm64/op_goto_16.S23
-rw-r--r--runtime/interpreter/mterp/arm64/op_goto_32.S32
-rw-r--r--runtime/interpreter/mterp/arm64/op_if_eq.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_if_eqz.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_if_ge.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_if_gez.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_if_gt.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_if_gtz.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_if_le.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_if_lez.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_if_lt.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_if_ltz.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_if_ne.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_if_nez.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget.S25
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_boolean.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_boolean_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_byte_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_char.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_char_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_object.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_object_quick.S15
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_quick.S15
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_short.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_short_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_wide.S21
-rw-r--r--runtime/interpreter/mterp/arm64/op_iget_wide_quick.S12
-rw-r--r--runtime/interpreter/mterp/arm64/op_instance_of.S23
-rw-r--r--runtime/interpreter/mterp/arm64/op_int_to_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_int_to_char.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_int_to_double.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_int_to_float.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_int_to_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_int_to_short.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_direct.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_direct_range.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_interface.S8
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_interface_range.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_static.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_static_range.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_super.S8
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_super_range.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_virtual.S8
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_virtual_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_virtual_range.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_invoke_virtual_range_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput.S21
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_boolean.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_boolean_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_byte_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_char.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_char_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_object.S10
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_object_quick.S9
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_quick.S14
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_short.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_short_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_wide.S15
-rw-r--r--runtime/interpreter/mterp/arm64/op_iput_wide_quick.S13
-rw-r--r--runtime/interpreter/mterp/arm64/op_long_to_double.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_long_to_float.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_long_to_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_monitor_enter.S13
-rw-r--r--runtime/interpreter/mterp/arm64/op_monitor_exit.S17
-rw-r--r--runtime/interpreter/mterp/arm64/op_move.S14
-rw-r--r--runtime/interpreter/mterp/arm64/op_move_16.S14
-rw-r--r--runtime/interpreter/mterp/arm64/op_move_exception.S9
-rw-r--r--runtime/interpreter/mterp/arm64/op_move_from16.S14
-rw-r--r--runtime/interpreter/mterp/arm64/op_move_object.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_move_object_16.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_move_object_from16.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_move_result.S14
-rw-r--r--runtime/interpreter/mterp/arm64/op_move_result_object.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_move_result_wide.S9
-rw-r--r--runtime/interpreter/mterp/arm64/op_move_wide.S9
-rw-r--r--runtime/interpreter/mterp/arm64/op_move_wide_16.S9
-rw-r--r--runtime/interpreter/mterp/arm64/op_move_wide_from16.S9
-rw-r--r--runtime/interpreter/mterp/arm64/op_mul_double.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_mul_double_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_mul_float.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_mul_float_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_mul_int.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_mul_int_2addr.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_mul_int_lit16.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_mul_int_lit8.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_mul_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_mul_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_neg_double.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_neg_float.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_neg_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_neg_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_new_array.S18
-rw-r--r--runtime/interpreter/mterp/arm64/op_new_instance.S13
-rw-r--r--runtime/interpreter/mterp/arm64/op_nop.S3
-rw-r--r--runtime/interpreter/mterp/arm64/op_not_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_not_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_or_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_or_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_or_int_lit16.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_or_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_or_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_or_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_packed_switch.S39
-rw-r--r--runtime/interpreter/mterp/arm64/op_rem_double.S13
-rw-r--r--runtime/interpreter/mterp/arm64/op_rem_double_2addr.S12
-rw-r--r--runtime/interpreter/mterp/arm64/op_rem_float.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_rem_float_2addr.S13
-rw-r--r--runtime/interpreter/mterp/arm64/op_rem_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_rem_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_rem_int_lit16.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_rem_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_rem_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_rem_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_return.S19
-rw-r--r--runtime/interpreter/mterp/arm64/op_return_object.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_return_void.S12
-rw-r--r--runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S10
-rw-r--r--runtime/interpreter/mterp/arm64/op_return_wide.S18
-rw-r--r--runtime/interpreter/mterp/arm64/op_rsub_int.S2
-rw-r--r--runtime/interpreter/mterp/arm64/op_rsub_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget.S27
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_boolean.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_char.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_object.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_short.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sget_wide.S19
-rw-r--r--runtime/interpreter/mterp/arm64/op_shl_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_shl_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_shl_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_shl_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_shl_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_shr_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_shr_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_shr_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_shr_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_shr_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sparse_switch.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput.S19
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_boolean.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_byte.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_char.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_object.S10
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_short.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sput_wide.S18
-rw-r--r--runtime/interpreter/mterp/arm64/op_sub_double.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sub_double_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sub_float.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sub_float_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sub_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sub_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sub_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_sub_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_throw.S10
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_3e.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_3f.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_40.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_41.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_42.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_43.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_73.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_79.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_7a.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_f3.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_f4.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_f5.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_f6.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_f7.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_f8.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_f9.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_fa.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_fb.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_fc.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_fd.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_fe.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_unused_ff.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_ushr_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_ushr_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_ushr_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_ushr_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_xor_int.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_xor_int_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_xor_int_lit16.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_xor_int_lit8.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_xor_long.S1
-rw-r--r--runtime/interpreter/mterp/arm64/op_xor_long_2addr.S1
-rw-r--r--runtime/interpreter/mterp/arm64/shiftWide.S20
-rw-r--r--runtime/interpreter/mterp/arm64/shiftWide2addr.S16
-rw-r--r--runtime/interpreter/mterp/arm64/unop.S20
-rw-r--r--runtime/interpreter/mterp/arm64/unopWide.S18
-rw-r--r--runtime/interpreter/mterp/arm64/unused.S4
-rw-r--r--runtime/interpreter/mterp/arm64/zcmp.S33
-rw-r--r--runtime/interpreter/mterp/config_arm64513
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm64.S11726
-rwxr-xr-xruntime/interpreter/mterp/rebuild.sh2
-rw-r--r--runtime/jit/jit.cc2
-rw-r--r--runtime/mirror/array-inl.h12
-rw-r--r--runtime/mirror/array.h8
-rw-r--r--runtime/mirror/class-inl.h20
-rw-r--r--runtime/mirror/class.h19
-rw-r--r--runtime/mirror/iftable.h5
-rw-r--r--runtime/mirror/object-inl.h59
-rw-r--r--runtime/mirror/object.h25
-rw-r--r--runtime/mirror/reference-inl.h8
-rw-r--r--runtime/mirror/reference.h30
-rw-r--r--runtime/quick/inline_method_analyser.cc2
-rw-r--r--runtime/read_barrier-inl.h148
-rw-r--r--runtime/thread.cc2
-rw-r--r--runtime/thread.h25
-rw-r--r--runtime/utils/dex_cache_arrays_layout-inl.h16
328 files changed, 14798 insertions, 571 deletions
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 7bf6d21040..288f95e89b 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -263,7 +263,8 @@ LIBART_TARGET_SRC_FILES_arm := \
arch/arm/fault_handler_arm.cc
LIBART_TARGET_SRC_FILES_arm64 := \
- interpreter/mterp/mterp_stub.cc \
+ interpreter/mterp/mterp.cc \
+ interpreter/mterp/out/mterp_arm64.S \
arch/arm64/context_arm64.cc \
arch/arm64/entrypoints_init_arm64.cc \
arch/arm64/jni_entrypoints_arm64.S \
@@ -508,6 +509,7 @@ endif
ifeq ($$(art_target_or_host),target)
$$(eval $$(call set-target-local-clang-vars))
$$(eval $$(call set-target-local-cflags-vars,$(2)))
+ LOCAL_CLANG_arm64 := true
LOCAL_CFLAGS_$(DEX2OAT_TARGET_ARCH) += -DART_DEFAULT_INSTRUCTION_SET_FEATURES="$(LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES)"
LOCAL_CFLAGS_$(2ND_DEX2OAT_TARGET_ARCH) += -DART_DEFAULT_INSTRUCTION_SET_FEATURES="$(2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES)"
else # host
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 5c8ff8f300..4db941174d 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -113,6 +113,25 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pShrLong = nullptr;
qpoints->pUshrLong = nullptr;
+ // More math.
+ qpoints->pCos = cos;
+ qpoints->pSin = sin;
+ qpoints->pAcos = acos;
+ qpoints->pAsin = asin;
+ qpoints->pAtan = atan;
+ qpoints->pAtan2 = atan2;
+ qpoints->pCbrt = cbrt;
+ qpoints->pCosh = cosh;
+ qpoints->pExp = exp;
+ qpoints->pExpm1 = expm1;
+ qpoints->pHypot = hypot;
+ qpoints->pLog = log;
+ qpoints->pLog10 = log10;
+ qpoints->pNextAfter = nextafter;
+ qpoints->pSinh = sinh;
+ qpoints->pTan = tan;
+ qpoints->pTanh = tanh;
+
// Intrinsics
qpoints->pIndexOf = art_quick_indexof;
qpoints->pStringCompareTo = art_quick_string_compareto;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index 805131f1fd..abd7e83248 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -66,14 +66,6 @@ class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
return fix_cortex_a53_843419_;
}
- // NOTE: This flag can be tunned on a CPU basis. In general all ARMv8 CPUs
- // should prefer the Acquire-Release semantics over the explicit DMBs when
- // handling load/store-volatile. For a specific use case see the ARM64
- // Optimizing backend.
- bool PreferAcquireRelease() const {
- return true;
- }
-
virtual ~Arm64InstructionSetFeatures() {}
protected:
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index 599f24ed30..027e59c57a 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -30,8 +30,6 @@ TEST(Arm64InstructionSetFeaturesTest, Arm64Features) {
EXPECT_TRUE(arm64_features->Equals(arm64_features.get()));
EXPECT_STREQ("smp,a53", arm64_features->GetFeatureString().c_str());
EXPECT_EQ(arm64_features->AsBitmap(), 3U);
- // See the comments in instruction_set_features_arm64.h.
- EXPECT_TRUE(arm64_features->AsArm64InstructionSetFeatures()->PreferAcquireRelease());
}
} // namespace art
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 66c8aadf33..d264c9baaf 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1366,7 +1366,106 @@ END \name
.endm
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALL_ALLOC_ENTRYPOINTS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
+ENTRY art_quick_alloc_object_rosalloc
+
+ # Fast path rosalloc allocation
+ # a0: type_idx
+ # a1: ArtMethod*
+ # s1: Thread::Current
+ # -----------------------------
+ # t0: class
+ # t1: object size
+ # t2: rosalloc run
+ # t3: thread stack top offset
+ # a4: thread stack bottom offset
+ # v0: free list head
+ #
+ # a5, a6 : temps
+
+ ld $t0, ART_METHOD_DEX_CACHE_TYPES_OFFSET_64($a1) # Load dex cache resolved types array.
+
+ dsll $a5, $a0, COMPRESSED_REFERENCE_SIZE_SHIFT # Shift the value.
+ daddu $a5, $t0, $a5 # Compute the index.
+ lwu $t0, 0($a5) # Load class (t0).
+ beqzc $t0, .Lart_quick_alloc_object_rosalloc_slow_path
+
+ li $a6, MIRROR_CLASS_STATUS_INITIALIZED
+ lwu $a5, MIRROR_CLASS_STATUS_OFFSET($t0) # Check class status.
+ bnec $a5, $a6, .Lart_quick_alloc_object_rosalloc_slow_path
+
+ # Add a fake dependence from the following access flag and size loads to the status load. This
+ # is to prevent those loads from being reordered above the status load and reading wrong values.
+ xor $a5, $a5, $a5
+ daddu $t0, $t0, $a5
+
+ lwu $a5, MIRROR_CLASS_ACCESS_FLAGS_OFFSET($t0) # Check if access flags has
+ li $a6, ACCESS_FLAGS_CLASS_IS_FINALIZABLE # kAccClassIsFinalizable.
+ and $a6, $a5, $a6
+ bnezc $a6, .Lart_quick_alloc_object_rosalloc_slow_path
+
+ ld $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation stack
+ ld $a4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # has any room left.
+ bgeuc $t3, $a4, .Lart_quick_alloc_object_rosalloc_slow_path
+
+ lwu $t1, MIRROR_CLASS_OBJECT_SIZE_OFFSET($t0) # Load object size (t1).
+ li $a5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
+ # allocation.
+ bltuc $a5, $t1, .Lart_quick_alloc_object_rosalloc_slow_path
+
+ # Compute the rosalloc bracket index from the size. Allign up the size by the rosalloc bracket
+ # quantum size and divide by the quantum size and subtract by 1.
+ daddiu $t1, $t1, -1 # Decrease obj size and shift right by
+ dsrl $t1, $t1, ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT # quantum.
+
+ dsll $t2, $t1, POINTER_SIZE_SHIFT
+ daddu $t2, $t2, $s1
+ ld $t2, THREAD_ROSALLOC_RUNS_OFFSET($t2) # Load rosalloc run (t2).
+
+ # Load the free list head (v0).
+ # NOTE: this will be the return val.
+ ld $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
+ beqzc $v0, .Lart_quick_alloc_object_rosalloc_slow_path
+
+ # Load the next pointer of the head and update the list head with the next pointer.
+ ld $a5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
+ sd $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
+
+ # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
+ # asserted to match.
+
+#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
+#error "Class pointer needs to overwrite next pointer."
+#endif
+
+ POISON_HEAP_REF $t0
+ sw $t0, MIRROR_OBJECT_CLASS_OFFSET($v0)
+
+ # Push the new object onto the thread local allocation stack and increment the thread local
+ # allocation stack top.
+ sd $v0, 0($t3)
+ daddiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
+ sd $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
+
+ # Decrement the size of the free list.
+ lw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
+ addiu $a5, $a5, -1
+ sw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
+
+ sync # Fence.
+
+ jalr $zero, $ra
+ .cpreturn # Restore gp from t8 in branch delay slot.
+
+.Lart_quick_alloc_object_rosalloc_slow_path:
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ jal artAllocObjectFromCodeRosAlloc
+ move $a2 ,$s1 # Pass self as argument.
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+
+END art_quick_alloc_object_rosalloc
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
diff --git a/runtime/arch/mips64/registers_mips64.h b/runtime/arch/mips64/registers_mips64.h
index 1d07d47a7c..b027c955bf 100644
--- a/runtime/arch/mips64/registers_mips64.h
+++ b/runtime/arch/mips64/registers_mips64.h
@@ -61,6 +61,7 @@ enum GpuRegister {
RA = 31, // Return address.
TR = S1, // ART Thread Register
TMP = T8, // scratch register (in addition to AT)
+ TMP2 = T3, // scratch register (in addition to AT, reserved for assembler)
kNumberOfGpuRegisters = 32,
kNoGpuRegister = -1 // Signals an illegal register.
};
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 74eb7227dc..28540c8437 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -41,17 +41,15 @@
namespace art {
+template <ReadBarrierOption kReadBarrierOption>
inline mirror::Class* ArtMethod::GetDeclaringClassUnchecked() {
GcRootSource gc_root_source(this);
- return declaring_class_.Read(&gc_root_source);
-}
-
-inline mirror::Class* ArtMethod::GetDeclaringClassNoBarrier() {
- return declaring_class_.Read<kWithoutReadBarrier>();
+ return declaring_class_.Read<kReadBarrierOption>(&gc_root_source);
}
+template <ReadBarrierOption kReadBarrierOption>
inline mirror::Class* ArtMethod::GetDeclaringClass() {
- mirror::Class* result = GetDeclaringClassUnchecked();
+ mirror::Class* result = GetDeclaringClassUnchecked<kReadBarrierOption>();
if (kIsDebugBuild) {
if (!IsRuntimeMethod()) {
CHECK(result != nullptr) << this;
@@ -79,24 +77,28 @@ inline bool ArtMethod::CASDeclaringClass(mirror::Class* expected_class,
// AssertSharedHeld doesn't work in GetAccessFlags, so use a NO_THREAD_SAFETY_ANALYSIS helper.
// TODO: Figure out why ASSERT_SHARED_CAPABILITY doesn't work.
-ALWAYS_INLINE
-static inline void DoGetAccessFlagsHelper(ArtMethod* method) NO_THREAD_SAFETY_ANALYSIS {
- CHECK(method->IsRuntimeMethod() || method->GetDeclaringClass()->IsIdxLoaded() ||
- method->GetDeclaringClass()->IsErroneous());
+template <ReadBarrierOption kReadBarrierOption>
+ALWAYS_INLINE static inline void DoGetAccessFlagsHelper(ArtMethod* method)
+ NO_THREAD_SAFETY_ANALYSIS {
+ CHECK(method->IsRuntimeMethod() ||
+ method->GetDeclaringClass<kReadBarrierOption>()->IsIdxLoaded() ||
+ method->GetDeclaringClass<kReadBarrierOption>()->IsErroneous());
}
+template <ReadBarrierOption kReadBarrierOption>
inline uint32_t ArtMethod::GetAccessFlags() {
if (kIsDebugBuild) {
Thread* self = Thread::Current();
if (!Locks::mutator_lock_->IsSharedHeld(self)) {
ScopedObjectAccess soa(self);
- CHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
- GetDeclaringClass()->IsErroneous());
+ CHECK(IsRuntimeMethod() ||
+ GetDeclaringClass<kReadBarrierOption>()->IsIdxLoaded() ||
+ GetDeclaringClass<kReadBarrierOption>()->IsErroneous());
} else {
// We cannot use SOA in this case. We might be holding the lock, but may not be in the
// runnable state (e.g., during GC).
Locks::mutator_lock_->AssertSharedHeld(self);
- DoGetAccessFlagsHelper(this);
+ DoGetAccessFlagsHelper<kReadBarrierOption>(this);
}
}
return access_flags_;
@@ -469,7 +471,7 @@ void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) {
template <typename Visitor>
inline void ArtMethod::UpdateObjectsForImageRelocation(const Visitor& visitor) {
- mirror::Class* old_class = GetDeclaringClassNoBarrier();
+ mirror::Class* old_class = GetDeclaringClassUnchecked<kWithoutReadBarrier>();
mirror::Class* new_class = visitor(old_class);
if (old_class != new_class) {
SetDeclaringClass(new_class);
@@ -486,9 +488,9 @@ inline void ArtMethod::UpdateObjectsForImageRelocation(const Visitor& visitor) {
}
}
-template <typename Visitor>
+template <ReadBarrierOption kReadBarrierOption, typename Visitor>
inline void ArtMethod::UpdateEntrypoints(const Visitor& visitor) {
- if (IsNative()) {
+ if (IsNative<kReadBarrierOption>()) {
const void* old_native_code = GetEntryPointFromJni();
const void* new_native_code = visitor(old_native_code);
if (old_native_code != new_native_code) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index a020e9d17e..078a978505 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -57,11 +57,10 @@ class ArtMethod FINAL {
jobject jlr_method)
SHARED_REQUIRES(Locks::mutator_lock_);
+ template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE mirror::Class* GetDeclaringClassNoBarrier()
- SHARED_REQUIRES(Locks::mutator_lock_);
-
+ template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE mirror::Class* GetDeclaringClassUnchecked()
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -77,6 +76,7 @@ class ArtMethod FINAL {
// Note: GetAccessFlags acquires the mutator lock in debug mode to check that it is not called for
// a proxy method.
+ template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE uint32_t GetAccessFlags();
void SetAccessFlags(uint32_t new_access_flags) {
@@ -154,8 +154,9 @@ class ArtMethod FINAL {
return (GetAccessFlags() & kAccDefault) != 0;
}
+ template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsNative() {
- return (GetAccessFlags() & kAccNative) != 0;
+ return (GetAccessFlags<kReadBarrierOption>() & kAccNative) != 0;
}
bool IsFastNative() {
@@ -485,7 +486,7 @@ class ArtMethod FINAL {
SHARED_REQUIRES(Locks::mutator_lock_);
// Update entry points by passing them through the visitor.
- template <typename Visitor>
+ template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
ALWAYS_INLINE void UpdateEntrypoints(const Visitor& visitor);
protected:
diff --git a/runtime/base/stl_util.h b/runtime/base/stl_util.h
index ad03c319d9..a53dcea2d7 100644
--- a/runtime/base/stl_util.h
+++ b/runtime/base/stl_util.h
@@ -156,6 +156,20 @@ struct CStringLess {
}
};
+// 32-bit FNV-1a hash function suitable for std::unordered_map.
+// It can be used with any container which works with range-based for loop.
+// See http://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function
+template <typename Vector>
+struct FNVHash {
+ size_t operator()(const Vector& vector) const {
+ uint32_t hash = 2166136261u;
+ for (const auto& value : vector) {
+ hash = (hash ^ value) * 16777619u;
+ }
+ return hash;
+ }
+};
+
// Use to suppress type deduction for a function argument.
// See std::identity<> for more background:
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1856.html#20.2.2 - move/forward helpers
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 0667e23898..04fe79aeb5 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1182,11 +1182,15 @@ class VerifyClassInTableArtMethodVisitor : public ArtMethodVisitor {
ClassTable* const table_;
};
-void ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
+bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
gc::space::ImageSpace* space,
Handle<mirror::ClassLoader> class_loader,
Handle<mirror::ObjectArray<mirror::DexCache>> dex_caches,
- bool added_class_table) {
+ bool added_class_table,
+ bool* out_forward_dex_cache_array,
+ std::string* out_error_msg) {
+ DCHECK(out_forward_dex_cache_array != nullptr);
+ DCHECK(out_error_msg != nullptr);
Thread* const self = Thread::Current();
gc::Heap* const heap = Runtime::Current()->GetHeap();
const ImageHeader& header = space->GetImageHeader();
@@ -1194,8 +1198,11 @@ void ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
// class loader fields.
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
ClassTable* table = InsertClassTableForClassLoader(class_loader.Get());
- // TODO: Store class table in the image to avoid manually adding the classes.
- for (int32_t i = 0, num_dex_caches = dex_caches->GetLength(); i < num_dex_caches; i++) {
+ // Dex cache array fixup is all or nothing, we must reject app images that have mixed since we
+ // rely on clobering the dex cache arrays in the image to forward to bss.
+ size_t num_dex_caches_with_bss_arrays = 0;
+ const size_t num_dex_caches = dex_caches->GetLength();
+ for (size_t i = 0; i < num_dex_caches; i++) {
mirror::DexCache* const dex_cache = dex_caches->Get(i);
const DexFile* const dex_file = dex_cache->GetDexFile();
// If the oat file expects the dex cache arrays to be in the BSS, then allocate there and
@@ -1209,22 +1216,22 @@ void ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
CHECK_EQ(num_types, dex_cache->NumResolvedTypes());
CHECK_EQ(num_methods, dex_cache->NumResolvedMethods());
CHECK_EQ(num_fields, dex_cache->NumResolvedFields());
- if (dex_file->GetOatDexFile() != nullptr &&
- dex_file->GetOatDexFile()->GetDexCacheArrays() != nullptr) {
+ const OatFile::OatDexFile* oat_dex_file = dex_file->GetOatDexFile();
+ if (oat_dex_file != nullptr && oat_dex_file->GetDexCacheArrays() != nullptr) {
+ ++num_dex_caches_with_bss_arrays;
DexCacheArraysLayout layout(image_pointer_size_, dex_file);
- uint8_t* const raw_arrays = dex_file->GetOatDexFile()->GetDexCacheArrays();
- // The space is not yet visible to the GC, we can avoid the read barriers and use
- // std::copy_n.
+ uint8_t* const raw_arrays = oat_dex_file->GetDexCacheArrays();
+ // The space is not yet visible to the GC, we can avoid the read barriers and use std::copy_n.
if (num_strings != 0u) {
+ GcRoot<mirror::String>* const image_resolved_strings = dex_cache->GetStrings();
GcRoot<mirror::String>* const strings =
reinterpret_cast<GcRoot<mirror::String>*>(raw_arrays + layout.StringsOffset());
for (size_t j = 0; kIsDebugBuild && j < num_strings; ++j) {
DCHECK(strings[j].IsNull());
}
- std::copy_n(dex_cache->GetStrings(), num_strings, strings);
+ std::copy_n(image_resolved_strings, num_strings, strings);
dex_cache->SetStrings(strings);
}
-
if (num_types != 0u) {
GcRoot<mirror::Class>* const image_resolved_types = dex_cache->GetResolvedTypes();
GcRoot<mirror::Class>* const types =
@@ -1282,6 +1289,12 @@ void ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
// Update the class loader from the one in the image class loader to the one that loaded
// the app image.
klass->SetClassLoader(class_loader.Get());
+ // The resolved type could be from another dex cache, go through the dex cache just in
+ // case. May be null for array classes.
+ if (klass->GetDexCacheStrings() != nullptr) {
+ DCHECK(!klass->IsArrayClass());
+ klass->SetDexCacheStrings(klass->GetDexCache()->GetStrings());
+ }
// If there are multiple dex caches, there may be the same class multiple times
// in different dex caches. Check for this since inserting will add duplicates
// otherwise.
@@ -1326,7 +1339,6 @@ void ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
CHECK_EQ(table->LookupByDescriptor(super_class), super_class);
}
}
- DCHECK_EQ(klass->GetClassLoader(), class_loader.Get());
if (kIsDebugBuild) {
for (ArtMethod& m : klass->GetDirectMethods(sizeof(void*))) {
const void* code = m.GetEntryPointFromQuickCompiledCode();
@@ -1354,20 +1366,68 @@ void ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
}
}
}
- {
+ *out_forward_dex_cache_array = num_dex_caches_with_bss_arrays != 0;
+ if (*out_forward_dex_cache_array) {
+ if (num_dex_caches_with_bss_arrays != num_dex_caches) {
+ // Reject application image since we cannot forward only some of the dex cache arrays.
+ // TODO: We could get around this by having a dedicated forwarding slot. It should be an
+ // uncommon case.
+ *out_error_msg = StringPrintf("Dex caches in bss does not match total: %zu vs %zu",
+ num_dex_caches_with_bss_arrays,
+ num_dex_caches);
+ return false;
+ }
FixupArtMethodArrayVisitor visitor(header);
header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
- &visitor, space->Begin(), sizeof(void*));
+ &visitor,
+ space->Begin(),
+ sizeof(void*));
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get());
}
if (kIsDebugBuild) {
ClassTable* const class_table = class_loader.Get()->GetClassTable();
VerifyClassInTableArtMethodVisitor visitor2(class_table);
header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
- &visitor2, space->Begin(), sizeof(void*));
+ &visitor2,
+ space->Begin(),
+ sizeof(void*));
}
+ return true;
}
+class UpdateClassLoaderAndResolvedStringsVisitor {
+ public:
+ UpdateClassLoaderAndResolvedStringsVisitor(gc::space::ImageSpace* space,
+ mirror::ClassLoader* class_loader,
+ bool forward_strings)
+ : space_(space),
+ class_loader_(class_loader),
+ forward_strings_(forward_strings) {}
+
+ bool operator()(mirror::Class* klass) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (forward_strings_) {
+ GcRoot<mirror::String>* strings = klass->GetDexCacheStrings();
+ if (strings != nullptr) {
+ DCHECK(
+ space_->GetImageHeader().GetImageSection(ImageHeader::kSectionDexCacheArrays).Contains(
+ reinterpret_cast<uint8_t*>(strings) - space_->Begin()))
+ << "String dex cache array for " << PrettyClass(klass) << " is not in app image";
+ // Dex caches have already been updated, so take the strings pointer from there.
+ GcRoot<mirror::String>* new_strings = klass->GetDexCache()->GetStrings();
+ DCHECK_NE(strings, new_strings);
+ klass->SetDexCacheStrings(new_strings);
+ }
+ }
+ // Finally, update class loader.
+ klass->SetClassLoader(class_loader_);
+ return true;
+ }
+
+ gc::space::ImageSpace* const space_;
+ mirror::ClassLoader* const class_loader_;
+ const bool forward_strings_;
+};
+
bool ClassLinker::AddImageSpace(
gc::space::ImageSpace* space,
Handle<mirror::ClassLoader> class_loader,
@@ -1576,21 +1636,39 @@ bool ClassLinker::AddImageSpace(
if (app_image) {
GetOrCreateAllocatorForClassLoader(class_loader.Get()); // Make sure we have a linear alloc.
}
- if (class_table_section.Size() > 0u) {
- const uint64_t start_time2 = NanoTime();
+ ClassTable* class_table = nullptr;
+ {
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ClassTable* const class_table = InsertClassTableForClassLoader(class_loader.Get());
- class_table->ReadFromMemory(space->Begin() + class_table_section.Offset());
- if (app_image) {
- class_table->SetClassLoader(class_loader.Get());
- } else {
- dex_cache_boot_image_class_lookup_required_ = false;
+ class_table = InsertClassTableForClassLoader(class_loader.Get());
+ if (class_table_section.Size() > 0u) {
+ const uint64_t start_time2 = NanoTime();
+ class_table->ReadFromMemory(space->Begin() + class_table_section.Offset());
+ if (!app_image) {
+ dex_cache_boot_image_class_lookup_required_ = false;
+ }
+ VLOG(image) << "Adding class table classes took " << PrettyDuration(NanoTime() - start_time2);
+ added_class_table = true;
}
- VLOG(image) << "Adding class table classes took " << PrettyDuration(NanoTime() - start_time2);
- added_class_table = true;
}
if (app_image) {
- UpdateAppImageClassLoadersAndDexCaches(space, class_loader, dex_caches, added_class_table);
+ bool forward_dex_cache_arrays = false;
+ if (!UpdateAppImageClassLoadersAndDexCaches(space,
+ class_loader,
+ dex_caches,
+ added_class_table,
+ /*out*/&forward_dex_cache_arrays,
+ /*out*/error_msg)) {
+ return false;
+ }
+ if (added_class_table) {
+ WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ // Update class loader and resolved strings. If added_class_table is false, the resolved
+ // strings were already updated in UpdateAppImageClassLoadersAndDexCaches.
+ UpdateClassLoaderAndResolvedStringsVisitor visitor(space,
+ class_loader.Get(),
+ forward_dex_cache_arrays);
+ class_table->Visit(visitor);
+ }
}
VLOG(class_linker) << "Adding image space took " << PrettyDuration(NanoTime() - start_time);
return true;
@@ -1677,7 +1755,7 @@ class VisitClassLoaderClassesVisitor : public ClassLoaderVisitor {
void Visit(mirror::ClassLoader* class_loader)
SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
ClassTable* const class_table = class_loader->GetClassTable();
- if (!done_ && class_table != nullptr && !class_table->Visit(visitor_)) {
+ if (!done_ && class_table != nullptr && !class_table->Visit(*visitor_)) {
// If the visitor ClassTable returns false it means that we don't need to continue.
done_ = true;
}
@@ -1690,7 +1768,7 @@ class VisitClassLoaderClassesVisitor : public ClassLoaderVisitor {
};
void ClassLinker::VisitClassesInternal(ClassVisitor* visitor) {
- if (boot_class_table_.Visit(visitor)) {
+ if (boot_class_table_.Visit(*visitor)) {
VisitClassLoaderClassesVisitor loader_visitor(visitor);
VisitClassLoaders(&loader_visitor);
}
@@ -1713,7 +1791,7 @@ void ClassLinker::VisitClasses(ClassVisitor* visitor) {
class GetClassesInToVector : public ClassVisitor {
public:
- bool Visit(mirror::Class* klass) OVERRIDE {
+ bool operator()(mirror::Class* klass) OVERRIDE {
classes_.push_back(klass);
return true;
}
@@ -1725,7 +1803,7 @@ class GetClassInToObjectArray : public ClassVisitor {
explicit GetClassInToObjectArray(mirror::ObjectArray<mirror::Class>* arr)
: arr_(arr), index_(0) {}
- bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
++index_;
if (index_ <= arr_->GetLength()) {
arr_->Set(index_ - 1, klass);
@@ -1746,16 +1824,17 @@ class GetClassInToObjectArray : public ClassVisitor {
void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor) {
// TODO: it may be possible to avoid secondary storage if we iterate over dex caches. The problem
// is avoiding duplicates.
+ Thread* const self = Thread::Current();
if (!kMovingClasses) {
+ ScopedAssertNoThreadSuspension nts(self, __FUNCTION__);
GetClassesInToVector accumulator;
VisitClasses(&accumulator);
for (mirror::Class* klass : accumulator.classes_) {
- if (!visitor->Visit(klass)) {
+ if (!visitor->operator()(klass)) {
return;
}
}
} else {
- Thread* const self = Thread::Current();
StackHandleScope<1> hs(self);
auto classes = hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr);
// We size the array assuming classes won't be added to the class table during the visit.
@@ -1783,7 +1862,7 @@ void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor) {
// the class table grew then the loop repeats. If classes are created after the loop has
// finished then we don't visit.
mirror::Class* klass = classes->Get(i);
- if (klass != nullptr && !visitor->Visit(klass)) {
+ if (klass != nullptr && !visitor->operator()(klass)) {
return;
}
}
@@ -7154,7 +7233,7 @@ class DumpClassVisitor : public ClassVisitor {
public:
explicit DumpClassVisitor(int flags) : flags_(flags) {}
- bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
klass->DumpClass(LOG(ERROR), flags_);
return true;
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 3b4e9121d0..71fcf296bd 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -60,6 +60,13 @@ template<size_t kNumReferences> class PACKED(4) StackHandleScope;
enum VisitRootFlags : uint8_t;
+class ClassVisitor {
+ public:
+ virtual ~ClassVisitor() {}
+ // Return true to continue visiting.
+ virtual bool operator()(mirror::Class* klass) = 0;
+};
+
class ClassLoaderVisitor {
public:
virtual ~ClassLoaderVisitor() {}
@@ -1008,11 +1015,13 @@ class ClassLinker {
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::classlinker_classes_lock_);
- void UpdateAppImageClassLoadersAndDexCaches(
+ bool UpdateAppImageClassLoadersAndDexCaches(
gc::space::ImageSpace* space,
Handle<mirror::ClassLoader> class_loader,
Handle<mirror::ObjectArray<mirror::DexCache>> dex_caches,
- bool added_class_table)
+ bool added_class_table,
+ bool* out_forward_dex_cache_array,
+ std::string* out_error_msg)
REQUIRES(!dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index b86da9fbb5..3a0f3e5065 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -210,11 +210,10 @@ class ClassLinkerTest : public CommonRuntimeTest {
klass->GetDescriptor(&temp2)));
if (klass->IsInterface()) {
EXPECT_TRUE(klass->IsAbstract());
- if (klass->NumDirectMethods() == 1) {
- EXPECT_TRUE(klass->GetDirectMethod(0, sizeof(void*))->IsClassInitializer());
- EXPECT_TRUE(klass->GetDirectMethod(0, sizeof(void*))->IsDirect());
- } else {
- EXPECT_EQ(0U, klass->NumDirectMethods());
+ // Check that all direct methods are static (either <clinit> or a regular static method).
+ for (ArtMethod& m : klass->GetDirectMethods(sizeof(void*))) {
+ EXPECT_TRUE(m.IsStatic());
+ EXPECT_TRUE(m.IsDirect());
}
} else {
if (!klass->IsSynthetic()) {
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index aef02b6d5d..e512906507 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -28,6 +28,9 @@ void ClassTable::VisitRoots(Visitor& visitor) {
visitor.VisitRoot(root.AddressWithoutBarrier());
}
}
+ for (GcRoot<mirror::Object>& root : dex_files_) {
+ visitor.VisitRoot(root.AddressWithoutBarrier());
+ }
}
template<class Visitor>
@@ -42,6 +45,19 @@ void ClassTable::VisitRoots(const Visitor& visitor) {
}
}
+template <typename Visitor>
+bool ClassTable::Visit(Visitor& visitor) {
+ for (ClassSet& class_set : classes_) {
+ for (GcRoot<mirror::Class>& root : class_set) {
+ if (!visitor(root.Read())) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
} // namespace art
#endif // ART_RUNTIME_CLASS_TABLE_INL_H_
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index 2a4f0e01af..afb0556e1e 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -73,17 +73,6 @@ mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* kl
return existing;
}
-bool ClassTable::Visit(ClassVisitor* visitor) {
- for (ClassSet& class_set : classes_) {
- for (GcRoot<mirror::Class>& root : class_set) {
- if (!visitor->Visit(root.Read())) {
- return false;
- }
- }
- }
- return true;
-}
-
size_t ClassTable::NumZygoteClasses() const {
size_t sum = 0;
for (size_t i = 0; i < classes_.size() - 1; ++i) {
@@ -183,12 +172,4 @@ size_t ClassTable::ReadFromMemory(uint8_t* ptr) {
return read_count;
}
-void ClassTable::SetClassLoader(mirror::ClassLoader* class_loader) {
- for (const ClassSet& class_set : classes_) {
- for (const GcRoot<mirror::Class>& root : class_set) {
- root.Read()->SetClassLoader(class_loader);
- }
- }
-}
-
} // namespace art
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 0b420352c3..5f2eb48d55 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -36,13 +36,6 @@ namespace mirror {
class ClassLoader;
} // namespace mirror
-class ClassVisitor {
- public:
- virtual ~ClassVisitor() {}
- // Return true to continue visiting.
- virtual bool Visit(mirror::Class* klass) = 0;
-};
-
// Each loader has a ClassTable
class ClassTable {
public:
@@ -80,8 +73,9 @@ class ClassTable {
NO_THREAD_SAFETY_ANALYSIS
SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
- // Return false if the callback told us to exit.
- bool Visit(ClassVisitor* visitor)
+ // Stops visit if the visitor returns false.
+ template <typename Visitor>
+ bool Visit(Visitor& visitor)
SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
// Return the first class that matches the descriptor. Returns null if there are none.
@@ -118,11 +112,6 @@ class ClassTable {
REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
- // Change the class loader of all the contained classes.
- void SetClassLoader(mirror::ClassLoader* class_loader)
- REQUIRES(Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
private:
class ClassDescriptorHashEquals {
public:
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index a0f875d6b8..904490aa8c 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -983,7 +983,7 @@ class ClassListCreator : public ClassVisitor {
public:
explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
- bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
if (!c->IsPrimitive()) {
classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
}
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 5345b890a1..5c5abeb0a6 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -349,7 +349,7 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che
// Check that the class pointer inside the object is not null and is aligned.
// TODO: Method might be not a heap address, and GetClass could fault.
// No read barrier because method_obj may not be a real object.
- mirror::Class* cls = method_obj->GetDeclaringClassNoBarrier();
+ mirror::Class* cls = method_obj->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
if (cls == nullptr) {
VLOG(signals) << "not a class";
return false;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 9397c3585a..8e1b7f4d1a 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1081,7 +1081,7 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
!IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
// Leave this Reference gray in the queue so that GetReferent() will trigger a read barrier. We
// will change it to black or white later in ReferenceQueue::DequeuePendingReference().
- DCHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref;
+ DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
} else {
// We may occasionally leave a Reference black or white in the queue if its referent happens to
// be concurrently marked after the Scan() call above has enqueued the Reference, in which case
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d76a8d149f..136b793ba1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -317,7 +317,10 @@ Heap::Heap(size_t initial_size,
// Remove already loaded spaces.
for (space::Space* loaded_space : added_image_spaces) {
RemoveSpace(loaded_space);
+ delete loaded_space;
}
+ boot_image_spaces_.clear();
+ requested_alloc_space_begin = nullptr;
break;
}
}
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 8356814354..e172f85b19 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -93,7 +93,7 @@ mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference*
// in the heap causing corruption since this field would get swept.
if (collector_->IsMarkedHeapReference(referent_addr)) {
if (!preserving_references_ ||
- (LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) {
+ (LIKELY(!reference->IsFinalizerReferenceInstance()) && reference->IsUnprocessed())) {
return referent_addr->AsMirrorPtr();
}
}
@@ -275,7 +275,7 @@ bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference
// GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
// race.
MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
- if (!reference->IsEnqueued()) {
+ if (reference->IsUnprocessed()) {
CHECK(reference->IsFinalizerReferenceInstance());
reference->SetPendingNext(reference);
return true;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 67dcc2d1a8..03ab9a1a73 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -32,42 +32,37 @@ ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
DCHECK(ref != nullptr);
MutexLock mu(self, *lock_);
- if (!ref->IsEnqueued()) {
- EnqueuePendingReference(ref);
+ if (ref->IsUnprocessed()) {
+ EnqueueReference(ref);
}
}
void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
- CHECK(ref->IsEnqueuable());
- EnqueuePendingReference(ref);
-}
-
-void ReferenceQueue::EnqueuePendingReference(mirror::Reference* ref) {
DCHECK(ref != nullptr);
+ CHECK(ref->IsUnprocessed());
if (IsEmpty()) {
// 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
list_ = ref;
} else {
mirror::Reference* head = list_->GetPendingNext();
+ DCHECK(head != nullptr);
ref->SetPendingNext(head);
}
+ // Add the reference in the middle to preserve the cycle.
list_->SetPendingNext(ref);
}
mirror::Reference* ReferenceQueue::DequeuePendingReference() {
DCHECK(!IsEmpty());
- mirror::Reference* head = list_->GetPendingNext();
- DCHECK(head != nullptr);
- mirror::Reference* ref;
+ mirror::Reference* ref = list_->GetPendingNext();
+ DCHECK(ref != nullptr);
// Note: the following code is thread-safe because it is only called from ProcessReferences which
// is single threaded.
- if (list_ == head) {
- ref = list_;
+ if (list_ == ref) {
list_ = nullptr;
} else {
- mirror::Reference* next = head->GetPendingNext();
+ mirror::Reference* next = ref->GetPendingNext();
list_->SetPendingNext(next);
- ref = head;
}
ref->SetPendingNext(nullptr);
Heap* heap = Runtime::Current()->GetHeap();
@@ -152,9 +147,7 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
} else {
ref->ClearReferent<false>();
}
- if (ref->IsEnqueuable()) {
- cleared_references->EnqueuePendingReference(ref);
- }
+ cleared_references->EnqueueReference(ref);
}
}
}
@@ -167,8 +160,6 @@ void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_referenc
if (referent_addr->AsMirrorPtr() != nullptr &&
!collector->IsMarkedHeapReference(referent_addr)) {
mirror::Object* forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
- // If the referent is non-null the reference must queuable.
- DCHECK(ref->IsEnqueuable());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
ref->SetZombie<true>(forward_address);
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index aabac97742..04d3454c04 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -44,27 +44,24 @@ class GarbageCollector;
class Heap;
// Used to temporarily store java.lang.ref.Reference(s) during GC and prior to queueing on the
-// appropriate java.lang.ref.ReferenceQueue. The linked list is maintained in the
-// java.lang.ref.Reference objects.
+// appropriate java.lang.ref.ReferenceQueue. The linked list is maintained as an unordered,
+// circular, and singly-linked list using the pendingNext fields of the java.lang.ref.Reference
+// objects.
class ReferenceQueue {
public:
explicit ReferenceQueue(Mutex* lock);
- // Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
- // since it uses a lock to avoid a race between checking for the references presence and adding
- // it.
+ // Enqueue a reference if it is unprocessed. Thread safe to call from multiple
+ // threads since it uses a lock to avoid a race between checking for the references presence and
+ // adding it.
void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*lock_);
- // Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the
- // reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock
- // overhead.
+ // Enqueue a reference. The reference must be unprocessed.
+ // Not thread safe, used when mutators are paused to minimize lock overhead.
void EnqueueReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_);
- // Enqueue a reference without checking that it is enqueable.
- void EnqueuePendingReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Dequeue the first reference (returns list_).
+ // Dequeue a reference from the queue and return that dequeued reference.
mirror::Reference* DequeuePendingReference() SHARED_REQUIRES(Locks::mutator_lock_);
// Enqueues finalizer references with white referents. White referents are blackened, moved to
diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc
index dc23afed1d..35bf718875 100644
--- a/runtime/gc/reference_queue_test.cc
+++ b/runtime/gc/reference_queue_test.cc
@@ -41,19 +41,22 @@ TEST_F(ReferenceQueueTest, EnqueueDequeue) {
ASSERT_TRUE(ref1.Get() != nullptr);
auto ref2(hs.NewHandle(ref_class->AllocObject(self)->AsReference()));
ASSERT_TRUE(ref2.Get() != nullptr);
- // FIFO ordering.
- queue.EnqueuePendingReference(ref1.Get());
+ queue.EnqueueReference(ref1.Get());
ASSERT_TRUE(!queue.IsEmpty());
ASSERT_EQ(queue.GetLength(), 1U);
- queue.EnqueuePendingReference(ref2.Get());
+ queue.EnqueueReference(ref2.Get());
ASSERT_TRUE(!queue.IsEmpty());
ASSERT_EQ(queue.GetLength(), 2U);
- ASSERT_EQ(queue.DequeuePendingReference(), ref2.Get());
+
+ std::set<mirror::Reference*> refs = {ref1.Get(), ref2.Get()};
+ std::set<mirror::Reference*> dequeued;
+ dequeued.insert(queue.DequeuePendingReference());
ASSERT_TRUE(!queue.IsEmpty());
ASSERT_EQ(queue.GetLength(), 1U);
- ASSERT_EQ(queue.DequeuePendingReference(), ref1.Get());
+ dequeued.insert(queue.DequeuePendingReference());
ASSERT_EQ(queue.GetLength(), 0U);
ASSERT_TRUE(queue.IsEmpty());
+ ASSERT_EQ(refs, dequeued);
}
TEST_F(ReferenceQueueTest, Dump) {
@@ -75,9 +78,9 @@ TEST_F(ReferenceQueueTest, Dump) {
ASSERT_TRUE(ref1.Get() != nullptr);
auto ref2(hs.NewHandle(finalizer_ref_class->AllocObject(self)->AsReference()));
ASSERT_TRUE(ref2.Get() != nullptr);
- queue.EnqueuePendingReference(ref1.Get());
+ queue.EnqueueReference(ref1.Get());
queue.Dump(LOG(INFO));
- queue.EnqueuePendingReference(ref2.Get());
+ queue.EnqueueReference(ref2.Get());
queue.Dump(LOG(INFO));
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 998db5271a..92693395f1 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -867,20 +867,20 @@ class FixupObjectVisitor : public FixupVisitor {
if (obj->IsClass<kVerifyNone, kWithoutReadBarrier>()) {
mirror::Class* klass = obj->AsClass<kVerifyNone, kWithoutReadBarrier>();
FixupObjectAdapter visitor(boot_image_, boot_oat_, app_image_, app_oat_);
- klass->FixupNativePointers(klass, sizeof(void*), visitor);
+ klass->FixupNativePointers<kVerifyNone, kWithoutReadBarrier>(klass, sizeof(void*), visitor);
// Deal with the arrays.
mirror::PointerArray* vtable = klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
if (vtable != nullptr) {
- vtable->Fixup(vtable, sizeof(void*), visitor);
+ vtable->Fixup<kVerifyNone, kWithoutReadBarrier>(vtable, sizeof(void*), visitor);
}
mirror::IfTable* iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
if (iftable != nullptr) {
- for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
- if (iftable->GetMethodArrayCount(i) > 0) {
+ for (int32_t i = 0, count = iftable->Count(); i < count; ++i) {
+ if (iftable->GetMethodArrayCount<kVerifyNone, kWithoutReadBarrier>(i) > 0) {
mirror::PointerArray* methods =
iftable->GetMethodArray<kVerifyNone, kWithoutReadBarrier>(i);
DCHECK(methods != nullptr);
- methods->Fixup(methods, sizeof(void*), visitor);
+ methods->Fixup<kVerifyNone, kWithoutReadBarrier>(methods, sizeof(void*), visitor);
}
}
}
@@ -925,7 +925,7 @@ class FixupArtMethodVisitor : public FixupVisitor, public ArtMethodVisitor {
if (fixup_heap_objects_) {
method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this));
}
- method->UpdateEntrypoints(ForwardCodeAdapter(this));
+ method->UpdateEntrypoints<kWithoutReadBarrier>(ForwardCodeAdapter(this));
}
private:
@@ -1014,6 +1014,7 @@ static bool RelocateInPlace(ImageHeader& image_header,
// Nothing to fix up.
return true;
}
+ ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
// Need to update the image to be at the target base.
const ImageSection& objects_section = image_header.GetImageSection(ImageHeader::kSectionObjects);
uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
@@ -1039,7 +1040,7 @@ static bool RelocateInPlace(ImageHeader& image_header,
CHECK_EQ(image_header.GetImageBegin(), target_base);
// Fix up dex cache DexFile pointers.
auto* dex_caches = image_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kDexCaches)->
- AsObjectArray<mirror::DexCache>();
+ AsObjectArray<mirror::DexCache, kVerifyNone, kWithoutReadBarrier>();
for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
mirror::DexCache* dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i);
// Fix up dex cache pointers.
@@ -1047,7 +1048,7 @@ static bool RelocateInPlace(ImageHeader& image_header,
if (strings != nullptr) {
GcRoot<mirror::String>* new_strings = fixup_adapter.ForwardObject(strings);
if (strings != new_strings) {
- dex_cache->SetFieldPtr64<false>(mirror::DexCache::StringsOffset(), new_strings);
+ dex_cache->SetStrings(new_strings);
}
dex_cache->FixupStrings<kWithoutReadBarrier>(new_strings, fixup_adapter);
}
@@ -1055,7 +1056,7 @@ static bool RelocateInPlace(ImageHeader& image_header,
if (types != nullptr) {
GcRoot<mirror::Class>* new_types = fixup_adapter.ForwardObject(types);
if (types != new_types) {
- dex_cache->SetFieldPtr64<false>(mirror::DexCache::ResolvedTypesOffset(), new_types);
+ dex_cache->SetResolvedTypes(new_types);
}
dex_cache->FixupResolvedTypes<kWithoutReadBarrier>(new_types, fixup_adapter);
}
@@ -1063,7 +1064,7 @@ static bool RelocateInPlace(ImageHeader& image_header,
if (methods != nullptr) {
ArtMethod** new_methods = fixup_adapter.ForwardObject(methods);
if (methods != new_methods) {
- dex_cache->SetFieldPtr64<false>(mirror::DexCache::ResolvedMethodsOffset(), new_methods);
+ dex_cache->SetResolvedMethods(new_methods);
}
for (size_t j = 0, num = dex_cache->NumResolvedMethods(); j != num; ++j) {
ArtMethod* orig = mirror::DexCache::GetElementPtrSize(new_methods, j, sizeof(void*));
@@ -1077,7 +1078,7 @@ static bool RelocateInPlace(ImageHeader& image_header,
if (fields != nullptr) {
ArtField** new_fields = fixup_adapter.ForwardObject(fields);
if (fields != new_fields) {
- dex_cache->SetFieldPtr64<false>(mirror::DexCache::ResolvedFieldsOffset(), new_fields);
+ dex_cache->SetResolvedFields(new_fields);
}
for (size_t j = 0, num = dex_cache->NumResolvedFields(); j != num; ++j) {
ArtField* orig = mirror::DexCache::GetElementPtrSize(new_fields, j, sizeof(void*));
diff --git a/runtime/image.cc b/runtime/image.cc
index de00343451..1f54e3e6ae 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '6', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '7', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index c57b1bbf2d..748463529e 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -55,7 +55,7 @@ class InstallStubsClassVisitor : public ClassVisitor {
explicit InstallStubsClassVisitor(Instrumentation* instrumentation)
: instrumentation_(instrumentation) {}
- bool Visit(mirror::Class* klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
instrumentation_->InstallStubsForClass(klass);
return true; // we visit all classes.
}
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 2559222b68..0b2471b4c0 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -240,7 +240,7 @@ static std::ostream& operator<<(std::ostream& os, const InterpreterImplKind& rhs
}
#if !defined(__clang__)
-#if (defined(__arm__) || defined(__i386__))
+#if (defined(__arm__) || defined(__i386__) || defined(__aarch64__))
// TODO: remove when all targets implemented.
static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
#else
@@ -248,7 +248,7 @@ static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKin
#endif
#else
// Clang 3.4 fails to build the goto interpreter implementation.
-#if (defined(__arm__) || defined(__i386__))
+#if (defined(__arm__) || defined(__i386__) || defined(__aarch64__))
static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
#else
static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImplKind;
diff --git a/runtime/interpreter/mterp/arm64/alt_stub.S b/runtime/interpreter/mterp/arm64/alt_stub.S
new file mode 100644
index 0000000000..9b8b16d727
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/alt_stub.S
@@ -0,0 +1,12 @@
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (${opnum} * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
diff --git a/runtime/interpreter/mterp/arm64/bincmp.S b/runtime/interpreter/mterp/arm64/bincmp.S
new file mode 100644
index 0000000000..ecab2ceba7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/bincmp.S
@@ -0,0 +1,37 @@
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov w1, wINST, lsr #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, w3 // compare (vA, vB)
+ mov${condition} w1, #2 // w1<- BYTE branch dist for not-taken
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh rIBASE
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Offset if branch not taken
+ cmp w2, w3 // compare (vA, vB)
+ csel w1, w1, w0, ${condition} // Branch if true
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm64/binop.S b/runtime/interpreter/mterp/arm64/binop.S
new file mode 100644
index 0000000000..b629b0b37e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/binop.S
@@ -0,0 +1,33 @@
+%default {"preinstr":"", "result":"w0", "chkzero":"0"}
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w9, wINST, #8 // w9<- AA
+ lsr w3, w0, #8 // w3<- CC
+ and w2, w0, #255 // w2<- BB
+ GET_VREG w1, w3 // w1<- vCC
+ GET_VREG w0, w2 // w0<- vBB
+ .if $chkzero
+ cbz w1, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $preinstr // optional op; may set condition codes
+ $instr // $result<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG $result, w9 // vAA<- $result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binop2addr.S b/runtime/interpreter/mterp/arm64/binop2addr.S
new file mode 100644
index 0000000000..a480a7d551
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/binop2addr.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"w0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w1, w3 // w1<- vB
+ GET_VREG w0, w9 // w0<- vA
+ .if $chkzero
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ $preinstr // optional op; may set condition codes
+ $instr // $result<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG $result, w9 // vAA<- $result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binopLit16.S b/runtime/interpreter/mterp/arm64/binopLit16.S
new file mode 100644
index 0000000000..4f9d205b38
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/binopLit16.S
@@ -0,0 +1,28 @@
+%default {"preinstr":"", "result":"w0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
+ lsr w2, wINST, #12 // w2<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w0, w2 // w0<- vB
+ .if $chkzero
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $preinstr
+ $instr // $result<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG $result, w9 // vAA<- $result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binopLit8.S b/runtime/interpreter/mterp/arm64/binopLit8.S
new file mode 100644
index 0000000000..326c657652
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/binopLit8.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"w0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ lsr w9, wINST, #8 // w9<- AA
+ and w2, w3, #255 // w2<- BB
+ GET_VREG w0, w2 // w0<- vBB
+ asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ .if $chkzero
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $preinstr // optional op; may set condition codes
+ $instr // $result<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG $result, w9 // vAA<- $result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binopWide.S b/runtime/interpreter/mterp/arm64/binopWide.S
new file mode 100644
index 0000000000..9de24f1c22
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/binopWide.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "instr":"add x0, x1, x2", "result":"x0", "r1":"x1", "r2":"x2", "chkzero":"0"}
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE $r2, w2 // w2<- vCC
+ GET_VREG_WIDE $r1, w1 // w1<- vBB
+ .if $chkzero
+ cbz $r2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $preinstr
+ $instr // $result<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE $result, w4 // vAA<- $result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binopWide2addr.S b/runtime/interpreter/mterp/arm64/binopWide2addr.S
new file mode 100644
index 0000000000..d9927a2ca8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/binopWide2addr.S
@@ -0,0 +1,29 @@
+%default {"preinstr":"", "instr":"add x0, x0, x1", "r0":"x0", "r1":"x1", "chkzero":"0"}
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE $r1, w1 // x1<- vB
+ GET_VREG_WIDE $r0, w2 // x0<- vA
+ .if $chkzero
+ cbz $r1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ $preinstr
+ $instr // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE $r0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/entry.S b/runtime/interpreter/mterp/arm64/entry.S
new file mode 100644
index 0000000000..f9073ab5d9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/entry.S
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ .text
+
+/*
+ * Interpreter entry point.
+ * On entry:
+ * x0 Thread* self/
+ * x1 code_item
+ * x2 ShadowFrame
+ * x3 JValue* result_register
+ *
+ */
+ .global ExecuteMterpImpl
+ .type ExecuteMterpImpl, %function
+ .balign 16
+
+ExecuteMterpImpl:
+ .cfi_startproc
+ stp xIBASE, xREFS, [sp, #-64]!
+ stp xSELF, xINST, [sp, #16]
+ stp xPC, xFP, [sp, #32]
+ stp fp, lr, [sp, #48]
+ add fp, sp, #48
+
+ /* Remember the return register */
+ str x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
+
+ /* Remember the code_item */
+ str x1, [x2, #SHADOWFRAME_CODE_ITEM_OFFSET]
+
+ /* set up "named" registers */
+ mov xSELF, x0
+ ldr w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
+ add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to insns[] (i.e. - the dalivk byte code).
+ add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame
+ ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
+ add xPC, x1, #CODEITEM_INSNS_OFFSET // Point to base of insns[]
+ add xPC, xPC, w0, lsl #1 // Create direct pointer to 1st dex opcode
+ EXPORT_PC
+
+ /* Starting ibase */
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
+
+ /* start executing the instruction at rPC */
+ FETCH_INST // load wINST from rPC
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+ /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/arm64/fallback.S b/runtime/interpreter/mterp/arm64/fallback.S
new file mode 100644
index 0000000000..44e7e1220d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/fallback.S
@@ -0,0 +1,3 @@
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
diff --git a/runtime/interpreter/mterp/arm64/fbinop.S b/runtime/interpreter/mterp/arm64/fbinop.S
new file mode 100644
index 0000000000..926d0783da
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/fbinop.S
@@ -0,0 +1,19 @@
+%default {}
+ /*:
+ * Generic 32-bit floating-point operation.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ * form: <op> s0, s0, s1
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH w0, 1 // r0<- CCBB
+ lsr w1, w0, #8 // r2<- CC
+ and w0, w0, #255 // r1<- BB
+ GET_VREG s1, w1
+ GET_VREG s0, w0
+ $instr // s0<- op
+ lsr w1, wINST, #8 // r1<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s0, w1
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/fbinop2addr.S b/runtime/interpreter/mterp/arm64/fbinop2addr.S
new file mode 100644
index 0000000000..0d57cbf2cf
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/fbinop2addr.S
@@ -0,0 +1,18 @@
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w9, wINST, #8 // w9<- A+
+ and w9, w9, #15 // w9<- A
+ GET_VREG s1, w3
+ GET_VREG s0, w9
+ $instr // s2<- op
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s2, w9
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/fcmp.S b/runtime/interpreter/mterp/arm64/fcmp.S
new file mode 100644
index 0000000000..a45e789f68
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/fcmp.S
@@ -0,0 +1,20 @@
+%default {"wide":"", "r1":"s1", "r2":"s2", "default_val":"-1","cond":"le"}
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG$wide $r1, w2
+ GET_VREG$wide $r2, w3
+ mov w0, #$default_val
+ fcmp $r1, $r2
+ csneg w0, w0, w0, $cond
+ csel w0, wzr, w0, eq
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w4 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/footer.S b/runtime/interpreter/mterp/arm64/footer.S
new file mode 100644
index 0000000000..b360539a8c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/footer.S
@@ -0,0 +1,170 @@
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+#define MTERP_LOGGING 0
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogDivideByZeroException
+#endif
+ b MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogArrayIndexException
+#endif
+ b MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNegativeArraySizeException
+#endif
+ b MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNoSuchMethodException
+#endif
+ b MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNullObjectException
+#endif
+ b MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogExceptionThrownException
+#endif
+ b MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ ldr x2, [xSELF, #THREAD_FLAGS_OFFSET]
+ bl MterpLogSuspendFallback
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ ldr x0, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ cbz x0, MterpFallback // If not, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpHandleException // (self, shadow_frame)
+ cbz w0, MterpExceptionReturn // no local catch, back to caller.
+ ldr x0, [xFP, #OFF_FP_CODE_ITEM]
+ ldr w1, [xFP, #OFF_FP_DEX_PC]
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
+ add xPC, x0, #CODEITEM_INSNS_OFFSET
+ add xPC, xPC, x1, lsl #1 // generate new dex_pc_ptr
+ str xPC, [xFP, #OFF_FP_DEX_PC_PTR]
+ /* resume execution at catch block */
+ FETCH_INST
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+ /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request. Assumes wINST already loaded, xPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE
+ ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.ne check1
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+check1:
+ EXPORT_PC
+ mov x0, xSELF
+ bl MterpSuspendCheck // (self)
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogFallback
+#endif
+MterpCommonFallback:
+ mov x0, #0 // signal retry with reference interpreter.
+ b MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * uint32_t* xFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ mov x0, #1 // signal return to caller.
+ b MterpDone
+MterpReturn:
+ ldr x2, [xFP, #OFF_FP_RESULT_REGISTER]
+ ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
+ str x0, [x2]
+ mov x0, xSELF
+ ands lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.eq check2
+ bl MterpSuspendCheck // (self)
+check2:
+ mov x0, #1 // signal return to caller.
+MterpDone:
+ ldp fp, lr, [sp, #48]
+ ldp xPC, xFP, [sp, #32]
+ ldp xSELF, xINST, [sp, #16]
+ ldp xIBASE, xREFS, [sp], #64
+ ret
+
+ .cfi_endproc
+ .size ExecuteMterpImpl, .-ExecuteMterpImpl
+
diff --git a/runtime/interpreter/mterp/arm64/funopNarrow.S b/runtime/interpreter/mterp/arm64/funopNarrow.S
new file mode 100644
index 0000000000..9f5ad1e87a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/funopNarrow.S
@@ -0,0 +1,18 @@
+%default {"srcreg":"s0", "tgtreg":"d0"}
+ /*
+ * Generic 32bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
+ *
+ * For: int-to-float, float-to-int
+ * TODO: refactor all of the conversions - parameterize width and use same template.
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG $srcreg, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ $instr // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG $tgtreg, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/funopNarrower.S b/runtime/interpreter/mterp/arm64/funopNarrower.S
new file mode 100644
index 0000000000..411396b290
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/funopNarrower.S
@@ -0,0 +1,17 @@
+%default {"srcreg":"s0", "tgtreg":"d0"}
+ /*
+ * Generic 64bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
+ *
+ * For: int-to-double, float-to-double, float-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG_WIDE $srcreg, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ $instr // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG $tgtreg, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/funopWide.S b/runtime/interpreter/mterp/arm64/funopWide.S
new file mode 100644
index 0000000000..d83b39c251
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/funopWide.S
@@ -0,0 +1,17 @@
+%default {"srcreg":"s0", "tgtreg":"d0"}
+ /*
+ * Generic 64bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
+ *
+ * For: long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG_WIDE $srcreg, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ $instr // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE $tgtreg, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/funopWider.S b/runtime/interpreter/mterp/arm64/funopWider.S
new file mode 100644
index 0000000000..50a73f1997
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/funopWider.S
@@ -0,0 +1,17 @@
+%default {"srcreg":"s0", "tgtreg":"d0"}
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
+ *
+ * For: int-to-double, float-to-double, float-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG $srcreg, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ $instr // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE $tgtreg, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/header.S b/runtime/interpreter/mterp/arm64/header.S
new file mode 100644
index 0000000000..351a6075cb
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/header.S
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat xFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via xFP &
+ number_of_vregs_.
+
+ */
+
+/*
+ARM64 Runtime register usage conventions.
+
+ r0 : w0 is 32-bit return register and x0 is 64-bit.
+ r0-r7 : Argument registers.
+ r8-r15 : Caller save registers (used as temporary registers).
+ r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
+ the linker, by the trampolines and other stubs (the backend uses
+ these as temporary registers).
+ r18 : Caller save register (used as temporary register).
+ r19 : Pointer to thread-local storage.
+ r20-r29: Callee save registers.
+ r30 : (lr) is reserved (the link register).
+ rsp : (sp) is reserved (the stack pointer).
+ rzr : (zr) is reserved (the zero register).
+
+ Floating-point registers
+ v0-v31
+
+ v0 : s0 is return register for singles (32-bit) and d0 for doubles (64-bit).
+ This is analogous to the C/C++ (hard-float) calling convention.
+ v0-v7 : Floating-point argument registers in both Dalvik and C/C++ conventions.
+ Also used as temporary and codegen scratch registers.
+
+ v0-v7 and v16-v31 : trashed across C calls.
+ v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved).
+
+ v16-v31: Used as codegen temp/scratch.
+ v8-v15 : Can be used for promotion.
+
+ Must maintain 16-byte stack alignment.
+
+Mterp notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ x20 xPC interpreted program counter, used for fetching instructions
+ x21 xFP interpreted frame pointer, used for accessing locals and args
+ x22 xSELF self (Thread) pointer
+ x23 xINST first 16-bit code unit of current instruction
+ x24 xIBASE interpreted instruction base pointer, used for computed goto
+ x25 xREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+ x16 ip scratch reg
+ x17 ip2 scratch reg (used by macros)
+
+Macros are provided for common operations. They MUST NOT alter unspecified registers or condition
+codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/* During bringup, we'll use the shadow frame model instead of xFP */
+/* single-purpose registers, given names for clarity */
+#define xPC x20
+#define xFP x21
+#define xSELF x22
+#define xINST x23
+#define wINST w23
+#define xIBASE x24
+#define xREFS x25
+#define ip x16
+#define ip2 x17
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep xFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+/*
+ *
+ * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
+ * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
+ * mterp should do so as well.
+ */
+#define MTERP_SUSPEND 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ str xPC, [xFP, #OFF_FP_DEX_PC_PTR]
+.endm
+
+/*
+ * Fetch the next instruction from xPC into wINST. Does not advance xPC.
+ */
+.macro FETCH_INST
+ ldrh wINST, [xPC]
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset. Advances xPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+ ldrh wINST, [xPC, #((\count)*2)]!
+.endm
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to xPC and xINST).
+ */
+.macro PREFETCH_ADVANCE_INST dreg, sreg, count
+ ldrh \dreg, [\sreg, #((\count)*2)]!
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update xPC. Used to load
+ * xINST ahead of possible exception point. Be sure to manually advance xPC
+ * later.
+ */
+.macro PREFETCH_INST count
+ ldrh wINST, [xPC, #((\count)*2)]
+.endm
+
+/* Advance xPC by some number of code units. */
+.macro ADVANCE count
+ add xPC, xPC, #((\count)*2)
+.endm
+
+/*
+ * Fetch the next instruction from an offset specified by _reg and advance xPC.
+ * xPC to point to the next instruction. "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value. Must not set flags.
+ *
+ */
+.macro FETCH_ADVANCE_INST_RB reg
+ add xPC, xPC, \reg, sxtw
+ ldrh wINST, [xPC]
+.endm
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance xPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+.macro FETCH reg, count
+ ldrh \reg, [xPC, #((\count)*2)]
+.endm
+
+.macro FETCH_S reg, count
+ ldrsh \reg, [xPC, #((\count)*2)]
+.endm
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+.macro FETCH_B reg, count, byte
+ ldrb \reg, [xPC, #((\count)*2+(\byte))]
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+ and \reg, xINST, #255
+.endm
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+.macro GET_PREFETCHED_OPCODE oreg, ireg
+ and \oreg, \ireg, #255
+.endm
+
+/*
+ * Begin executing the opcode in _reg. Clobbers reg
+ */
+
+.macro GOTO_OPCODE reg
+ add \reg, xIBASE, \reg, lsl #${handler_size_bits}
+ br \reg
+.endm
+.macro GOTO_OPCODE_BASE base,reg
+ add \reg, \base, \reg, lsl #${handler_size_bits}
+ br \reg
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+.macro GET_VREG reg, vreg
+ ldr \reg, [xFP, \vreg, uxtw #2]
+.endm
+.macro SET_VREG reg, vreg
+ str \reg, [xFP, \vreg, uxtw #2]
+ str wzr, [xREFS, \vreg, uxtw #2]
+.endm
+.macro SET_VREG_OBJECT reg, vreg, tmpreg
+ str \reg, [xFP, \vreg, uxtw #2]
+ str \reg, [xREFS, \vreg, uxtw #2]
+.endm
+
+/*
+ * Get/set the 64-bit value from a Dalvik register.
+ * TUNING: can we do better here?
+ */
+.macro GET_VREG_WIDE reg, vreg
+ add ip2, xFP, \vreg, lsl #2
+ ldr \reg, [ip2]
+.endm
+.macro SET_VREG_WIDE reg, vreg
+ add ip2, xFP, \vreg, lsl #2
+ str \reg, [ip2]
+ add ip2, xREFS, \vreg, lsl #2
+ str xzr, [ip2]
+.endm
+
+/*
+ * Convert a virtual register index into an address.
+ */
+.macro VREG_INDEX_TO_ADDR reg, vreg
+ add \reg, xFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
+.endm
diff --git a/runtime/interpreter/mterp/arm64/invoke.S b/runtime/interpreter/mterp/arm64/invoke.S
new file mode 100644
index 0000000000..ff1974c51d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/invoke.S
@@ -0,0 +1,19 @@
+%default { "helper":"UndefinedInvokeHandler" }
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern $helper
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ // and x3, xINST, 0xFFFF
+ mov x3, xINST
+ bl $helper
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
diff --git a/runtime/interpreter/mterp/arm64/op_add_double.S b/runtime/interpreter/mterp/arm64/op_add_double.S
new file mode 100644
index 0000000000..8509f70309
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_add_double.S
@@ -0,0 +1 @@
+%include "arm64/binopWide.S" {"instr":"fadd d0, d1, d2", "result":"d0", "r1":"d1", "r2":"d2"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_double_2addr.S b/runtime/interpreter/mterp/arm64/op_add_double_2addr.S
new file mode 100644
index 0000000000..61fd58f4b6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_add_double_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binopWide2addr.S" {"instr":"fadd d0, d0, d1", "r0":"d0", "r1":"d1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_float.S b/runtime/interpreter/mterp/arm64/op_add_float.S
new file mode 100644
index 0000000000..7d09fef10a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_add_float.S
@@ -0,0 +1 @@
+%include "arm64/fbinop.S" {"instr":"fadd s0, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_float_2addr.S b/runtime/interpreter/mterp/arm64/op_add_float_2addr.S
new file mode 100644
index 0000000000..7b378e2889
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_add_float_2addr.S
@@ -0,0 +1 @@
+%include "arm64/fbinop2addr.S" {"instr":"fadd s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_int.S b/runtime/interpreter/mterp/arm64/op_add_int.S
new file mode 100644
index 0000000000..6eadb5441d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_add_int.S
@@ -0,0 +1 @@
+%include "arm64/binop.S" {"instr":"add w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_int_2addr.S b/runtime/interpreter/mterp/arm64/op_add_int_2addr.S
new file mode 100644
index 0000000000..d35bc8ecc9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_add_int_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binop2addr.S" {"instr":"add w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_int_lit16.S b/runtime/interpreter/mterp/arm64/op_add_int_lit16.S
new file mode 100644
index 0000000000..4930ad7716
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_add_int_lit16.S
@@ -0,0 +1 @@
+%include "arm64/binopLit16.S" {"instr":"add w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_int_lit8.S b/runtime/interpreter/mterp/arm64/op_add_int_lit8.S
new file mode 100644
index 0000000000..196ea9934d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_add_int_lit8.S
@@ -0,0 +1 @@
+%include "arm64/binopLit8.S" {"instr":"add w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_long.S b/runtime/interpreter/mterp/arm64/op_add_long.S
new file mode 100644
index 0000000000..bc334aa3b2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_add_long.S
@@ -0,0 +1 @@
+%include "arm64/binopWide.S" {"instr":"add x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_long_2addr.S b/runtime/interpreter/mterp/arm64/op_add_long_2addr.S
new file mode 100644
index 0000000000..5e5dbce73b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_add_long_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binopWide2addr.S" {"instr":"add x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_aget.S b/runtime/interpreter/mterp/arm64/op_aget.S
new file mode 100644
index 0000000000..662c9cc7cc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aget.S
@@ -0,0 +1,28 @@
+%default { "load":"ldr", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz x0, common_errNullObject // bail if null array object.
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, uxtw #$shift // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $load w2, [x0, #$data_offset] // w2<- vBB[vCC]
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w2, w9 // vAA<- w2
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_aget_boolean.S b/runtime/interpreter/mterp/arm64/op_aget_boolean.S
new file mode 100644
index 0000000000..6ab6cc1bff
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aget_boolean.S
@@ -0,0 +1 @@
+%include "arm64/op_aget.S" { "load":"ldrb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aget_byte.S b/runtime/interpreter/mterp/arm64/op_aget_byte.S
new file mode 100644
index 0000000000..c7f5b23ebf
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aget_byte.S
@@ -0,0 +1 @@
+%include "arm64/op_aget.S" { "load":"ldrsb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aget_char.S b/runtime/interpreter/mterp/arm64/op_aget_char.S
new file mode 100644
index 0000000000..9fddf1787a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aget_char.S
@@ -0,0 +1 @@
+%include "arm64/op_aget.S" { "load":"ldrh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aget_object.S b/runtime/interpreter/mterp/arm64/op_aget_object.S
new file mode 100644
index 0000000000..1bbe3e8a3a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aget_object.S
@@ -0,0 +1,20 @@
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ FETCH_B w3, 1, 1 // w3<- CC
+ EXPORT_PC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ bl artAGetObjectFromMterp // (array, index)
+ ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w2, wINST, #8 // w9<- AA
+ PREFETCH_INST 2
+ cbnz w1, MterpException
+ SET_VREG_OBJECT w0, w2
+ ADVANCE 2
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_aget_short.S b/runtime/interpreter/mterp/arm64/op_aget_short.S
new file mode 100644
index 0000000000..39554de6e6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aget_short.S
@@ -0,0 +1 @@
+%include "arm64/op_aget.S" { "load":"ldrsh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aget_wide.S b/runtime/interpreter/mterp/arm64/op_aget_wide.S
new file mode 100644
index 0000000000..6f990ba0cc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aget_wide.S
@@ -0,0 +1,21 @@
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz w0, common_errNullObject // yes, bail
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ ldr x2, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] // x2<- vBB[vCC]
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x2, w4
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_and_int.S b/runtime/interpreter/mterp/arm64/op_and_int.S
new file mode 100644
index 0000000000..31f3f73e7a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_and_int.S
@@ -0,0 +1 @@
+%include "arm64/binop.S" {"instr":"and w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_int_2addr.S b/runtime/interpreter/mterp/arm64/op_and_int_2addr.S
new file mode 100644
index 0000000000..e59632cd06
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_and_int_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binop2addr.S" {"instr":"and w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_int_lit16.S b/runtime/interpreter/mterp/arm64/op_and_int_lit16.S
new file mode 100644
index 0000000000..6540f81554
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_and_int_lit16.S
@@ -0,0 +1 @@
+%include "arm64/binopLit16.S" {"instr":"and w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_int_lit8.S b/runtime/interpreter/mterp/arm64/op_and_int_lit8.S
new file mode 100644
index 0000000000..167b40ecfb
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_and_int_lit8.S
@@ -0,0 +1 @@
+%include "arm64/binopLit8.S" {"instr":"and w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_long.S b/runtime/interpreter/mterp/arm64/op_and_long.S
new file mode 100644
index 0000000000..ede047d088
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_and_long.S
@@ -0,0 +1 @@
+%include "arm64/binopWide.S" {"instr":"and x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_long_2addr.S b/runtime/interpreter/mterp/arm64/op_and_long_2addr.S
new file mode 100644
index 0000000000..d62ccef891
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_and_long_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binopWide2addr.S" {"instr":"and x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_aput.S b/runtime/interpreter/mterp/arm64/op_aput.S
new file mode 100644
index 0000000000..175b483d7d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aput.S
@@ -0,0 +1,28 @@
+%default { "store":"str", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz w0, common_errNullObject // bail if null
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, lsl #$shift // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_VREG w2, w9 // w2<- vAA
+ GET_INST_OPCODE ip // extract opcode from rINST
+ $store w2, [x0, #$data_offset] // vBB[vCC]<- w2
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_aput_boolean.S b/runtime/interpreter/mterp/arm64/op_aput_boolean.S
new file mode 100644
index 0000000000..5e7a86f15c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aput_boolean.S
@@ -0,0 +1 @@
+%include "arm64/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aput_byte.S b/runtime/interpreter/mterp/arm64/op_aput_byte.S
new file mode 100644
index 0000000000..d659ebc3d0
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aput_byte.S
@@ -0,0 +1 @@
+%include "arm64/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aput_char.S b/runtime/interpreter/mterp/arm64/op_aput_char.S
new file mode 100644
index 0000000000..7547c80870
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aput_char.S
@@ -0,0 +1 @@
+%include "arm64/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aput_object.S b/runtime/interpreter/mterp/arm64/op_aput_object.S
new file mode 100644
index 0000000000..0146fdc95c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aput_object.S
@@ -0,0 +1,13 @@
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov w2, wINST
+ bl MterpAputObject
+ cbz w0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_aput_short.S b/runtime/interpreter/mterp/arm64/op_aput_short.S
new file mode 100644
index 0000000000..8631e28070
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aput_short.S
@@ -0,0 +1 @@
+%include "arm64/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aput_wide.S b/runtime/interpreter/mterp/arm64/op_aput_wide.S
new file mode 100644
index 0000000000..e1cf9c1c2f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_aput_wide.S
@@ -0,0 +1,21 @@
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz w0, common_errNullObject // bail if null
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ GET_VREG_WIDE x1, w4
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ str x1, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_array_length.S b/runtime/interpreter/mterp/arm64/op_array_length.S
new file mode 100644
index 0000000000..0cce917ff7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_array_length.S
@@ -0,0 +1,12 @@
+ /*
+ * Return the length of an array.
+ */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w0, w1 // w0<- vB (object ref)
+ cbz w0, common_errNullObject // yup, fail
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- array length
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w3, w2 // vB<- length
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_check_cast.S b/runtime/interpreter/mterp/arm64/op_check_cast.S
new file mode 100644
index 0000000000..cb9f6068e0
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_check_cast.S
@@ -0,0 +1,16 @@
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class//BBBB */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- BBBB
+ lsr w1, wINST, #8 // w1<- AA
+ VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
+ mov x3, xSELF // w3<- self
+ bl MterpCheckCast // (index, &obj, method, self)
+ PREFETCH_INST 2
+ cbnz w0, MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_cmp_long.S b/runtime/interpreter/mterp/arm64/op_cmp_long.S
new file mode 100644
index 0000000000..982e5b161d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_cmp_long.S
@@ -0,0 +1,13 @@
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG_WIDE x1, w2
+ GET_VREG_WIDE x2, w3
+ cmp x1, x2
+ csinc w0, wzr, wzr, eq
+ csneg w0, w0, w0, ge
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ SET_VREG w0, w4
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_cmpg_double.S b/runtime/interpreter/mterp/arm64/op_cmpg_double.S
new file mode 100644
index 0000000000..14f9ff8a05
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_cmpg_double.S
@@ -0,0 +1 @@
+%include "arm64/fcmp.S" {"wide":"_WIDE", "r1":"d1", "r2":"d2", "default_val":"1", "cond":"pl"}
diff --git a/runtime/interpreter/mterp/arm64/op_cmpg_float.S b/runtime/interpreter/mterp/arm64/op_cmpg_float.S
new file mode 100644
index 0000000000..3a20cbae1e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_cmpg_float.S
@@ -0,0 +1 @@
+%include "arm64/fcmp.S" {"wide":"", "r1":"s1", "r2":"s2", "default_val":"1", "cond":"pl"}
diff --git a/runtime/interpreter/mterp/arm64/op_cmpl_double.S b/runtime/interpreter/mterp/arm64/op_cmpl_double.S
new file mode 100644
index 0000000000..06d59179e1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_cmpl_double.S
@@ -0,0 +1 @@
+%include "arm64/fcmp.S" {"wide":"_WIDE", "r1":"d1", "r2":"d2", "default_val":"-1", "cond":"le"}
diff --git a/runtime/interpreter/mterp/arm64/op_cmpl_float.S b/runtime/interpreter/mterp/arm64/op_cmpl_float.S
new file mode 100644
index 0000000000..d87d086259
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_cmpl_float.S
@@ -0,0 +1 @@
+%include "arm64/fcmp.S" {"wide":"", "r1":"s1", "r2":"s2", "default_val":"-1", "cond":"le"}
diff --git a/runtime/interpreter/mterp/arm64/op_const.S b/runtime/interpreter/mterp/arm64/op_const.S
new file mode 100644
index 0000000000..031ede1fb2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_const.S
@@ -0,0 +1,9 @@
+ /* const vAA, #+BBBBbbbb */
+ lsr w3, wINST, #8 // w3<- AA
+ FETCH w0, 1 // w0<- bbbb (low
+ FETCH w1, 2 // w1<- BBBB (high
+ FETCH_ADVANCE_INST 3 // advance rPC, load wINST
+ orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG w0, w3 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_16.S b/runtime/interpreter/mterp/arm64/op_const_16.S
new file mode 100644
index 0000000000..27f527313d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_const_16.S
@@ -0,0 +1,7 @@
+ /* const/16 vAA, #+BBBB */
+ FETCH_S w0, 1 // w0<- ssssBBBB (sign-extended
+ lsr w3, wINST, #8 // w3<- AA
+ FETCH_ADVANCE_INST 2 // advance xPC, load wINST
+ SET_VREG w0, w3 // vAA<- w0
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_4.S b/runtime/interpreter/mterp/arm64/op_const_4.S
new file mode 100644
index 0000000000..04cd4f81b6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_const_4.S
@@ -0,0 +1,8 @@
+ /* const/4 vA, #+B */
+ lsl w1, wINST, #16 // w1<- Bxxx0000
+ ubfx w0, wINST, #8, #4 // w0<- A
+ FETCH_ADVANCE_INST 1 // advance xPC, load wINST
+ asr w1, w1, #28 // w1<- sssssssB (sign-extended)
+ GET_INST_OPCODE ip // ip<- opcode from xINST
+ SET_VREG w1, w0 // fp[A]<- w1
+ GOTO_OPCODE ip // execute next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_class.S b/runtime/interpreter/mterp/arm64/op_const_class.S
new file mode 100644
index 0000000000..971cfa08bd
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_const_class.S
@@ -0,0 +1,12 @@
+ /* const/class vAA, Class//BBBB */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- BBBB
+ lsr w1, wINST, #8 // w1<- AA
+ add x2, xFP, #OFF_FP_SHADOWFRAME
+ mov x3, xSELF
+ bl MterpConstClass // (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2
+ cbnz w0, MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_high16.S b/runtime/interpreter/mterp/arm64/op_const_high16.S
new file mode 100644
index 0000000000..dd51ce1ebe
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_const_high16.S
@@ -0,0 +1,8 @@
+ /* const/high16 vAA, #+BBBB0000 */
+ FETCH w0, 1 // r0<- 0000BBBB (zero-extended
+ lsr w3, wINST, #8 // r3<- AA
+ lsl w0, w0, #16 // r0<- BBBB0000
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ SET_VREG w0, w3 // vAA<- r0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_string.S b/runtime/interpreter/mterp/arm64/op_const_string.S
new file mode 100644
index 0000000000..896f1e7104
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_const_string.S
@@ -0,0 +1,12 @@
+ /* const/string vAA, String//BBBB */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- BBBB
+ lsr w1, wINST, #8 // w1<- AA
+ add x2, xFP, #OFF_FP_SHADOWFRAME
+ mov x3, xSELF
+ bl MterpConstString // (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 // load rINST
+ cbnz w0, MterpPossibleException // let reference interpreter deal with it.
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_string_jumbo.S b/runtime/interpreter/mterp/arm64/op_const_string_jumbo.S
new file mode 100644
index 0000000000..e1a733987d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_const_string_jumbo.S
@@ -0,0 +1,14 @@
+ /* const/string vAA, String//BBBBBBBB */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- bbbb (low
+ FETCH w2, 2 // w2<- BBBB (high
+ lsr w1, wINST, #8 // w1<- AA
+ orr w0, w0, w2, lsl #16 // w1<- BBBBbbbb
+ add x2, xFP, #OFF_FP_SHADOWFRAME
+ mov x3, xSELF
+ bl MterpConstString // (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 3 // advance rPC
+ cbnz w0, MterpPossibleException // let reference interpreter deal with it.
+ ADVANCE 3 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_wide.S b/runtime/interpreter/mterp/arm64/op_const_wide.S
new file mode 100644
index 0000000000..8f57ddacfd
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_const_wide.S
@@ -0,0 +1,13 @@
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ FETCH w0, 1 // w0<- bbbb (low)
+ FETCH w1, 2 // w1<- BBBB (low middle)
+ FETCH w2, 3 // w2<- hhhh (high middle)
+ FETCH w3, 4 // w3<- HHHH (high)
+ lsr w4, wINST, #8 // r4<- AA
+ FETCH_ADVANCE_INST 5 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
+ orr x0, x0, x2, lsl #32 // w0<- hhhhBBBBbbbb
+ orr x0, x0, x3, lsl #48 // w0<- HHHHhhhhBBBBbbbb
+ SET_VREG_WIDE x0, w4
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_wide_16.S b/runtime/interpreter/mterp/arm64/op_const_wide_16.S
new file mode 100644
index 0000000000..e43628bccd
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_const_wide_16.S
@@ -0,0 +1,8 @@
+ /* const-wide/16 vAA, #+BBBB */
+ FETCH_S w0, 1 // w0<- ssssBBBB (sign-extended
+ lsr w3, wINST, #8 // w3<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ sbfm x0, x0, 0, 31
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w3
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_wide_32.S b/runtime/interpreter/mterp/arm64/op_const_wide_32.S
new file mode 100644
index 0000000000..527f7d8167
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_const_wide_32.S
@@ -0,0 +1,10 @@
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ FETCH w0, 1 // w0<- 0000bbbb (low)
+ lsr w3, wINST, #8 // w3<- AA
+ FETCH_S w2, 2 // w2<- ssssBBBB (high)
+ FETCH_ADVANCE_INST 3 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ orr w0, w0, w2, lsl #16 // w0<- BBBBbbbb
+ sbfm x0, x0, 0, 31
+ SET_VREG_WIDE x0, w3
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_wide_high16.S b/runtime/interpreter/mterp/arm64/op_const_wide_high16.S
new file mode 100644
index 0000000000..94ab9876c8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_const_wide_high16.S
@@ -0,0 +1,8 @@
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ FETCH w0, 1 // w0<- 0000BBBB (zero-extended)
+ lsr w1, wINST, #8 // w1<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ lsl x0, x0, #48
+ SET_VREG_WIDE x0, w1
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_div_double.S b/runtime/interpreter/mterp/arm64/op_div_double.S
new file mode 100644
index 0000000000..1f7dad0917
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_div_double.S
@@ -0,0 +1 @@
+%include "arm64/binopWide.S" {"instr":"fdiv d0, d1, d2", "result":"d0", "r1":"d1", "r2":"d2"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_double_2addr.S b/runtime/interpreter/mterp/arm64/op_div_double_2addr.S
new file mode 100644
index 0000000000..414a175658
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_div_double_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binopWide2addr.S" {"instr":"fdiv d0, d0, d1", "r0":"d0", "r1":"d1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_float.S b/runtime/interpreter/mterp/arm64/op_div_float.S
new file mode 100644
index 0000000000..f24a26c09b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_div_float.S
@@ -0,0 +1 @@
+%include "arm64/fbinop.S" {"instr":"fdiv s0, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_float_2addr.S b/runtime/interpreter/mterp/arm64/op_div_float_2addr.S
new file mode 100644
index 0000000000..2888049c9e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_div_float_2addr.S
@@ -0,0 +1 @@
+%include "arm64/fbinop2addr.S" {"instr":"fdiv s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_int.S b/runtime/interpreter/mterp/arm64/op_div_int.S
new file mode 100644
index 0000000000..88371c08d9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_div_int.S
@@ -0,0 +1 @@
+%include "arm64/binop.S" {"instr":"sdiv w0, w0, w1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_int_2addr.S b/runtime/interpreter/mterp/arm64/op_div_int_2addr.S
new file mode 100644
index 0000000000..5f5a80fe52
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_div_int_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binop2addr.S" {"instr":"sdiv w0, w0, w1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_int_lit16.S b/runtime/interpreter/mterp/arm64/op_div_int_lit16.S
new file mode 100644
index 0000000000..dc7a484c6a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_div_int_lit16.S
@@ -0,0 +1 @@
+%include "arm64/binopLit16.S" {"instr":"sdiv w0, w0, w1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_int_lit8.S b/runtime/interpreter/mterp/arm64/op_div_int_lit8.S
new file mode 100644
index 0000000000..c06521c4bc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_div_int_lit8.S
@@ -0,0 +1 @@
+%include "arm64/binopLit8.S" {"instr":"sdiv w0, w0, w1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_long.S b/runtime/interpreter/mterp/arm64/op_div_long.S
new file mode 100644
index 0000000000..820ae3db68
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_div_long.S
@@ -0,0 +1 @@
+%include "arm64/binopWide.S" {"instr":"sdiv x0, x1, x2", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_long_2addr.S b/runtime/interpreter/mterp/arm64/op_div_long_2addr.S
new file mode 100644
index 0000000000..da7eabdc9d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_div_long_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binopWide2addr.S" {"instr":"sdiv x0, x0, x1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_double_to_float.S b/runtime/interpreter/mterp/arm64/op_double_to_float.S
new file mode 100644
index 0000000000..c1555fdaf9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_double_to_float.S
@@ -0,0 +1 @@
+%include "arm64/funopNarrower.S" {"instr":"fcvt s0, d0", "srcreg":"d0", "tgtreg":"s0"}
diff --git a/runtime/interpreter/mterp/arm64/op_double_to_int.S b/runtime/interpreter/mterp/arm64/op_double_to_int.S
new file mode 100644
index 0000000000..7244bac2fc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_double_to_int.S
@@ -0,0 +1 @@
+%include "arm64/funopNarrower.S" {"instr":"fcvtzs w0, d0", "srcreg":"d0", "tgtreg":"w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_double_to_long.S b/runtime/interpreter/mterp/arm64/op_double_to_long.S
new file mode 100644
index 0000000000..741160b564
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_double_to_long.S
@@ -0,0 +1 @@
+%include "arm64/funopWide.S" {"instr":"fcvtzs x0, d0", "srcreg":"d0", "tgtreg":"x0"}
diff --git a/runtime/interpreter/mterp/arm64/op_fill_array_data.S b/runtime/interpreter/mterp/arm64/op_fill_array_data.S
new file mode 100644
index 0000000000..f50d9e40ad
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_fill_array_data.S
@@ -0,0 +1,13 @@
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- bbbb (lo)
+ FETCH w1, 2 // w1<- BBBB (hi)
+ lsr w3, wINST, #8 // w3<- AA
+ orr w1, w0, w1, lsl #16 // w1<- BBBBbbbb
+ GET_VREG w0, w3 // w0<- vAA (array object)
+ add x1, xPC, w1, lsl #1 // w1<- PC + BBBBbbbb*2 (array data off.)
+ bl MterpFillArrayData // (obj, payload)
+ cbz w0, MterpPossibleException // exception?
+ FETCH_ADVANCE_INST 3 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_filled_new_array.S b/runtime/interpreter/mterp/arm64/op_filled_new_array.S
new file mode 100644
index 0000000000..806a1b1201
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_filled_new_array.S
@@ -0,0 +1,18 @@
+%default { "helper":"MterpFilledNewArray" }
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
+ .extern $helper
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov x2, xSELF
+ bl $helper
+ cbz w0, MterpPossibleException
+ FETCH_ADVANCE_INST 3 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_filled_new_array_range.S b/runtime/interpreter/mterp/arm64/op_filled_new_array_range.S
new file mode 100644
index 0000000000..3c9a419628
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_filled_new_array_range.S
@@ -0,0 +1 @@
+%include "arm64/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_float_to_double.S b/runtime/interpreter/mterp/arm64/op_float_to_double.S
new file mode 100644
index 0000000000..892feca21b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_float_to_double.S
@@ -0,0 +1 @@
+%include "arm64/funopWider.S" {"instr":"fcvt d0, s0", "srcreg":"s0", "tgtreg":"d0"}
diff --git a/runtime/interpreter/mterp/arm64/op_float_to_int.S b/runtime/interpreter/mterp/arm64/op_float_to_int.S
new file mode 100644
index 0000000000..c849d8165d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_float_to_int.S
@@ -0,0 +1 @@
+%include "arm64/funopNarrow.S" {"instr":"fcvtzs w0, s0", "srcreg":"s0", "tgtreg":"w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_float_to_long.S b/runtime/interpreter/mterp/arm64/op_float_to_long.S
new file mode 100644
index 0000000000..c3de16f9ff
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_float_to_long.S
@@ -0,0 +1 @@
+%include "arm64/funopWider.S" {"instr":"fcvtzs x0, s0", "srcreg":"s0", "tgtreg":"x0"}
diff --git a/runtime/interpreter/mterp/arm64/op_goto.S b/runtime/interpreter/mterp/arm64/op_goto.S
new file mode 100644
index 0000000000..db98a45fae
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_goto.S
@@ -0,0 +1,28 @@
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ /* tuning: use sbfx for 6t2+ targets */
+#if MTERP_SUSPEND
+ mov w0, wINST, lsl #16 // w0<- AAxx0000
+ movs w1, w0, asr #24 // w1<- ssssssAA (sign-extended)
+ add w2, w1, w1 // w2<- byte offset, set flags
+ // If backwards branch refresh rIBASE
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh handler base
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET] // Preload flags for MterpCheckSuspendAndContinue
+ lsl w0, wINST, #16 // w0<- AAxx0000
+ asr w0, w0, #24 // w0<- ssssssAA (sign-extended)
+ adds w1, w0, w0 // Convert dalvik offset to byte offset, setting flags
+ FETCH_ADVANCE_INST_RB w1 // load wINST and advance xPC
+ // If backwards branch refresh rIBASE
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm64/op_goto_16.S b/runtime/interpreter/mterp/arm64/op_goto_16.S
new file mode 100644
index 0000000000..ff66a23c4e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_goto_16.S
@@ -0,0 +1,23 @@
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+#if MTERP_SUSPEND
+ FETCH_S w0, 1 // w0<- ssssAAAA (sign-extended)
+ adds w1, w0, w0 // w1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load rINST
+ ldrmi xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh handler base
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ FETCH_S w0, 1 // w0<- ssssAAAA (sign-extended)
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ adds w1, w0, w0 // w1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load rINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm64/op_goto_32.S b/runtime/interpreter/mterp/arm64/op_goto_32.S
new file mode 100644
index 0000000000..8a6980ecea
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_goto_32.S
@@ -0,0 +1,32 @@
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0". Because
+ * we need the V bit set, we'll use an adds to convert from Dalvik
+ * offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+#if MTERP_SUSPEND
+ FETCH w0, 1 // w0<- aaaa (lo)
+ FETCH w1, 2 // w1<- AAAA (hi)
+ orr w0, w0, w1, lsl #16 // w0<- AAAAaaaa
+ adds w1, w0, w0 // w1<- byte offset
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load xINST
+ ldrle xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh handler base
+ GET_INST_OPCODE ip // extract opcode from xINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ FETCH w0, 1 // w0<- aaaa (lo)
+ FETCH w1, 2 // w1<- AAAA (hi)
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ orr w0, w0, w1, lsl #16 // w0<- AAAAaaaa
+ adds w1, w0, w0 // w1<- byte offset
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load xINST
+ b.le MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from xINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm64/op_if_eq.S b/runtime/interpreter/mterp/arm64/op_if_eq.S
new file mode 100644
index 0000000000..aa4a0f16a7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_if_eq.S
@@ -0,0 +1 @@
+%include "arm64/bincmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_eqz.S b/runtime/interpreter/mterp/arm64/op_if_eqz.S
new file mode 100644
index 0000000000..1d3202e16c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_if_eqz.S
@@ -0,0 +1 @@
+%include "arm64/zcmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_ge.S b/runtime/interpreter/mterp/arm64/op_if_ge.S
new file mode 100644
index 0000000000..d6ec761bfe
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_if_ge.S
@@ -0,0 +1 @@
+%include "arm64/bincmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_gez.S b/runtime/interpreter/mterp/arm64/op_if_gez.S
new file mode 100644
index 0000000000..8e3abd3557
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_if_gez.S
@@ -0,0 +1 @@
+%include "arm64/zcmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_gt.S b/runtime/interpreter/mterp/arm64/op_if_gt.S
new file mode 100644
index 0000000000..7db8e9d911
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_if_gt.S
@@ -0,0 +1 @@
+%include "arm64/bincmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_gtz.S b/runtime/interpreter/mterp/arm64/op_if_gtz.S
new file mode 100644
index 0000000000..a4f2f6b661
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_if_gtz.S
@@ -0,0 +1 @@
+%include "arm64/zcmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_le.S b/runtime/interpreter/mterp/arm64/op_if_le.S
new file mode 100644
index 0000000000..ca3a83fff7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_if_le.S
@@ -0,0 +1 @@
+%include "arm64/bincmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_lez.S b/runtime/interpreter/mterp/arm64/op_if_lez.S
new file mode 100644
index 0000000000..c1425fddcf
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_if_lez.S
@@ -0,0 +1 @@
+%include "arm64/zcmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_lt.S b/runtime/interpreter/mterp/arm64/op_if_lt.S
new file mode 100644
index 0000000000..56450a15ca
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_if_lt.S
@@ -0,0 +1 @@
+%include "arm64/bincmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_ltz.S b/runtime/interpreter/mterp/arm64/op_if_ltz.S
new file mode 100644
index 0000000000..03cd3d6973
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_if_ltz.S
@@ -0,0 +1 @@
+%include "arm64/zcmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_ne.S b/runtime/interpreter/mterp/arm64/op_if_ne.S
new file mode 100644
index 0000000000..14d9e13dcf
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_if_ne.S
@@ -0,0 +1 @@
+%include "arm64/bincmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_nez.S b/runtime/interpreter/mterp/arm64/op_if_nez.S
new file mode 100644
index 0000000000..21e1bc2170
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_if_nez.S
@@ -0,0 +1 @@
+%include "arm64/zcmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget.S b/runtime/interpreter/mterp/arm64/op_iget.S
new file mode 100644
index 0000000000..165c7308e1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget.S
@@ -0,0 +1,25 @@
+%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
+ mov x3, xSELF // w3<- self
+ bl $helper
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx w2, wINST, #8, #4 // w2<- A
+ PREFETCH_INST 2
+ cbnz x3, MterpPossibleException // bail out
+ .if $is_object
+ SET_VREG_OBJECT w0, w2 // fp[A]<- w0
+ .else
+ SET_VREG w0, w2 // fp[A]<- w0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iget_boolean.S b/runtime/interpreter/mterp/arm64/op_iget_boolean.S
new file mode 100644
index 0000000000..36a9b6beb3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget_boolean.S
@@ -0,0 +1 @@
+%include "arm64/op_iget.S" { "helper":"artGetBooleanInstanceFromCode", "extend":"uxtb w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_boolean_quick.S b/runtime/interpreter/mterp/arm64/op_iget_boolean_quick.S
new file mode 100644
index 0000000000..2ceccb9ef0
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget_boolean_quick.S
@@ -0,0 +1 @@
+%include "arm64/op_iget_quick.S" { "load":"ldrb" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_byte.S b/runtime/interpreter/mterp/arm64/op_iget_byte.S
new file mode 100644
index 0000000000..fd3f164518
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget_byte.S
@@ -0,0 +1 @@
+%include "arm64/op_iget.S" { "helper":"artGetByteInstanceFromCode", "extend":"sxtb w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_byte_quick.S b/runtime/interpreter/mterp/arm64/op_iget_byte_quick.S
new file mode 100644
index 0000000000..6e97b72183
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget_byte_quick.S
@@ -0,0 +1 @@
+%include "arm64/op_iget_quick.S" { "load":"ldrsb" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_char.S b/runtime/interpreter/mterp/arm64/op_iget_char.S
new file mode 100644
index 0000000000..ea23275224
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget_char.S
@@ -0,0 +1 @@
+%include "arm64/op_iget.S" { "helper":"artGetCharInstanceFromCode", "extend":"uxth w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_char_quick.S b/runtime/interpreter/mterp/arm64/op_iget_char_quick.S
new file mode 100644
index 0000000000..325dd1cf9e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget_char_quick.S
@@ -0,0 +1 @@
+%include "arm64/op_iget_quick.S" { "load":"ldrh" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_object.S b/runtime/interpreter/mterp/arm64/op_iget_object.S
new file mode 100644
index 0000000000..03be78d2a1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget_object.S
@@ -0,0 +1 @@
+%include "arm64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_object_quick.S b/runtime/interpreter/mterp/arm64/op_iget_object_quick.S
new file mode 100644
index 0000000000..e9a797dfe1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget_object_quick.S
@@ -0,0 +1,15 @@
+ /* For: iget-object-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ EXPORT_PC
+ GET_VREG w0, w2 // w0<- object we're operating on
+ bl artIGetObjectFromMterp // (obj, offset)
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx w2, wINST, #8, #4 // w2<- A
+ PREFETCH_INST 2
+ cbnz w3, MterpPossibleException // bail out
+ SET_VREG_OBJECT w0, w2 // fp[A]<- w0
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iget_quick.S b/runtime/interpreter/mterp/arm64/op_iget_quick.S
new file mode 100644
index 0000000000..45c68a3a79
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget_quick.S
@@ -0,0 +1,15 @@
+%default { "load":"ldr", "extend":"" }
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- object we're operating on
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cmp x3, #0 // check object for null
+ beq common_errNullObject // object was null
+ $load w0, [x3, x1] // w0<- obj.field
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $extend
+ SET_VREG w0, w2 // fp[A]<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iget_short.S b/runtime/interpreter/mterp/arm64/op_iget_short.S
new file mode 100644
index 0000000000..c347542f03
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget_short.S
@@ -0,0 +1 @@
+%include "arm64/op_iget.S" { "helper":"artGetShortInstanceFromCode", "extend":"sxth w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_short_quick.S b/runtime/interpreter/mterp/arm64/op_iget_short_quick.S
new file mode 100644
index 0000000000..83670701c1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget_short_quick.S
@@ -0,0 +1 @@
+%include "arm64/op_iget_quick.S" { "load":"ldrsh" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_wide.S b/runtime/interpreter/mterp/arm64/op_iget_wide.S
new file mode 100644
index 0000000000..9718390c3b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget_wide.S
@@ -0,0 +1,21 @@
+ /*
+ * 64-bit instance field get.
+ *
+ * for: iget-wide
+ */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
+ mov x3, xSELF // w3<- self
+ bl artGet64InstanceFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx w2, wINST, #8, #4 // w2<- A
+ PREFETCH_INST 2
+ cmp w3, #0
+ cbnz w3, MterpException // bail out
+ SET_VREG_WIDE x0, w2
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iget_wide_quick.S b/runtime/interpreter/mterp/arm64/op_iget_wide_quick.S
new file mode 100644
index 0000000000..2480d2d222
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iget_wide_quick.S
@@ -0,0 +1,12 @@
+ /* iget-wide-quick vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w4, 1 // w4<- field byte offset
+ GET_VREG w3, w2 // w3<- object we're operating on
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cbz w3, common_errNullObject // object was null
+ add x4, x3, x4 // create direct pointer
+ ldr x0, [x4]
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ SET_VREG_WIDE x0, w2
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_instance_of.S b/runtime/interpreter/mterp/arm64/op_instance_of.S
new file mode 100644
index 0000000000..647bc75cfd
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_instance_of.S
@@ -0,0 +1,23 @@
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class//CCCC */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- CCCC
+ lsr w1, wINST, #12 // w1<- B
+ VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
+ mov x3, xSELF // w3<- self
+ bl MterpInstanceOf // (index, &obj, method, self)
+ ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w2, wINST, #8 // w2<- A+
+ and w2, w2, #15 // w2<- A
+ PREFETCH_INST 2
+ cbnz x1, MterpException
+ ADVANCE 2 // advance rPC
+ SET_VREG w0, w2 // vA<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_byte.S b/runtime/interpreter/mterp/arm64/op_int_to_byte.S
new file mode 100644
index 0000000000..43f814820a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_int_to_byte.S
@@ -0,0 +1 @@
+%include "arm64/unop.S" {"instr":"sxtb w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_char.S b/runtime/interpreter/mterp/arm64/op_int_to_char.S
new file mode 100644
index 0000000000..f092170681
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_int_to_char.S
@@ -0,0 +1 @@
+%include "arm64/unop.S" {"instr":"uxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_double.S b/runtime/interpreter/mterp/arm64/op_int_to_double.S
new file mode 100644
index 0000000000..3dee75a141
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_int_to_double.S
@@ -0,0 +1 @@
+%include "arm64/funopWider.S" {"instr":"scvtf d0, w0", "srcreg":"w0", "tgtreg":"d0"}
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_float.S b/runtime/interpreter/mterp/arm64/op_int_to_float.S
new file mode 100644
index 0000000000..3ebbdc7cb9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_int_to_float.S
@@ -0,0 +1 @@
+%include "arm64/funopNarrow.S" {"instr":"scvtf s0, w0", "srcreg":"w0", "tgtreg":"s0"}
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_long.S b/runtime/interpreter/mterp/arm64/op_int_to_long.S
new file mode 100644
index 0000000000..13d2120392
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_int_to_long.S
@@ -0,0 +1 @@
+%include "arm64/funopWider.S" {"instr":"sbfm x0, x0, 0, 31", "srcreg":"w0", "tgtreg":"x0"}
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_short.S b/runtime/interpreter/mterp/arm64/op_int_to_short.S
new file mode 100644
index 0000000000..87fb804668
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_int_to_short.S
@@ -0,0 +1 @@
+%include "arm64/unop.S" {"instr":"sxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_direct.S b/runtime/interpreter/mterp/arm64/op_invoke_direct.S
new file mode 100644
index 0000000000..c117232d9c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_direct.S
@@ -0,0 +1 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_direct_range.S b/runtime/interpreter/mterp/arm64/op_invoke_direct_range.S
new file mode 100644
index 0000000000..efc54c71d9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_direct_range.S
@@ -0,0 +1 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_interface.S b/runtime/interpreter/mterp/arm64/op_invoke_interface.S
new file mode 100644
index 0000000000..12dfa592d5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_interface.S
@@ -0,0 +1,8 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeInterface" }
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_interface_range.S b/runtime/interpreter/mterp/arm64/op_invoke_interface_range.S
new file mode 100644
index 0000000000..61caaf47e3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_interface_range.S
@@ -0,0 +1 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_static.S b/runtime/interpreter/mterp/arm64/op_invoke_static.S
new file mode 100644
index 0000000000..634eda2736
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_static.S
@@ -0,0 +1,2 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeStatic" }
+
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_static_range.S b/runtime/interpreter/mterp/arm64/op_invoke_static_range.S
new file mode 100644
index 0000000000..32cdcddaa4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_static_range.S
@@ -0,0 +1 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_super.S b/runtime/interpreter/mterp/arm64/op_invoke_super.S
new file mode 100644
index 0000000000..def2c552fd
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_super.S
@@ -0,0 +1,8 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeSuper" }
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_super_range.S b/runtime/interpreter/mterp/arm64/op_invoke_super_range.S
new file mode 100644
index 0000000000..27fb8591a4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_super_range.S
@@ -0,0 +1 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_virtual.S b/runtime/interpreter/mterp/arm64/op_invoke_virtual.S
new file mode 100644
index 0000000000..66d050217d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_virtual.S
@@ -0,0 +1,8 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeVirtual" }
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/arm64/op_invoke_virtual_quick.S
new file mode 100644
index 0000000000..4300c34646
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_virtual_quick.S
@@ -0,0 +1 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_virtual_range.S b/runtime/interpreter/mterp/arm64/op_invoke_virtual_range.S
new file mode 100644
index 0000000000..b43955c6d8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_virtual_range.S
@@ -0,0 +1 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/arm64/op_invoke_virtual_range_quick.S
new file mode 100644
index 0000000000..90c7b65747
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_invoke_virtual_range_quick.S
@@ -0,0 +1 @@
+%include "arm64/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput.S b/runtime/interpreter/mterp/arm64/op_iput.S
new file mode 100644
index 0000000000..a8c0e61a8f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput.S
@@ -0,0 +1,21 @@
+%default { "is_object":"0", "handler":"artSet32InstanceFromMterp" }
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field//CCCC */
+ .extern $handler
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w2, w2 // w2<- fp[A]
+ ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
+ PREFETCH_INST 2
+ bl $handler
+ cbnz w0, MterpPossibleException
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iput_boolean.S b/runtime/interpreter/mterp/arm64/op_iput_boolean.S
new file mode 100644
index 0000000000..bbf53192c5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput_boolean.S
@@ -0,0 +1 @@
+%include "arm64/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_boolean_quick.S b/runtime/interpreter/mterp/arm64/op_iput_boolean_quick.S
new file mode 100644
index 0000000000..25c61d7c2e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput_boolean_quick.S
@@ -0,0 +1 @@
+%include "arm64/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_byte.S b/runtime/interpreter/mterp/arm64/op_iput_byte.S
new file mode 100644
index 0000000000..bbf53192c5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput_byte.S
@@ -0,0 +1 @@
+%include "arm64/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_byte_quick.S b/runtime/interpreter/mterp/arm64/op_iput_byte_quick.S
new file mode 100644
index 0000000000..25c61d7c2e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput_byte_quick.S
@@ -0,0 +1 @@
+%include "arm64/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_char.S b/runtime/interpreter/mterp/arm64/op_iput_char.S
new file mode 100644
index 0000000000..150d8794ca
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput_char.S
@@ -0,0 +1 @@
+%include "arm64/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_char_quick.S b/runtime/interpreter/mterp/arm64/op_iput_char_quick.S
new file mode 100644
index 0000000000..c6ef46ab87
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput_char_quick.S
@@ -0,0 +1 @@
+%include "arm64/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_object.S b/runtime/interpreter/mterp/arm64/op_iput_object.S
new file mode 100644
index 0000000000..37a649be6b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput_object.S
@@ -0,0 +1,10 @@
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov w2, wINST
+ mov x3, xSELF
+ bl MterpIputObject
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iput_object_quick.S b/runtime/interpreter/mterp/arm64/op_iput_object_quick.S
new file mode 100644
index 0000000000..6fbf2b1da3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput_object_quick.S
@@ -0,0 +1,9 @@
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov w2, wINST
+ bl MterpIputObjectQuick
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iput_quick.S b/runtime/interpreter/mterp/arm64/op_iput_quick.S
new file mode 100644
index 0000000000..2afc51beed
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput_quick.S
@@ -0,0 +1,14 @@
+%default { "store":"str" }
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cmp w3, #0 // check object for null
+ cbz w3, common_errNullObject // object was null
+ GET_VREG w0, w2 // w0<- fp[A]
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $store w0, [x3, x1] // obj.field<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iput_short.S b/runtime/interpreter/mterp/arm64/op_iput_short.S
new file mode 100644
index 0000000000..150d8794ca
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput_short.S
@@ -0,0 +1 @@
+%include "arm64/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_short_quick.S b/runtime/interpreter/mterp/arm64/op_iput_short_quick.S
new file mode 100644
index 0000000000..c6ef46ab87
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput_short_quick.S
@@ -0,0 +1 @@
+%include "arm64/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_wide.S b/runtime/interpreter/mterp/arm64/op_iput_wide.S
new file mode 100644
index 0000000000..4ce95251f6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput_wide.S
@@ -0,0 +1,15 @@
+ /* iput-wide vA, vB, field//CCCC */
+ .extern artSet64InstanceFromMterp
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ add x2, xFP, x2, lsl #2 // w2<- &fp[A]
+ ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
+ PREFETCH_INST 2
+ bl artSet64InstanceFromMterp
+ cbnz w0, MterpPossibleException
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S b/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
new file mode 100644
index 0000000000..27b5dc57b7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
@@ -0,0 +1,13 @@
+ /* iput-wide-quick vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w3, 1 // w3<- field byte offset
+ GET_VREG w2, w2 // w2<- fp[B], the object pointer
+ ubfx w0, wINST, #8, #4 // w0<- A
+ cmp w2, #0 // check object for null
+ beq common_errNullObject // object was null
+ GET_VREG_WIDE x0, w0 // x0-< fp[A]
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ add x1, x2, x3 // create a direct pointer
+ str x0, [x1]
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_long_to_double.S b/runtime/interpreter/mterp/arm64/op_long_to_double.S
new file mode 100644
index 0000000000..a3f59c2048
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_long_to_double.S
@@ -0,0 +1 @@
+%include "arm64/funopWide.S" {"instr":"scvtf d0, x0", "srcreg":"x0", "tgtreg":"d0"}
diff --git a/runtime/interpreter/mterp/arm64/op_long_to_float.S b/runtime/interpreter/mterp/arm64/op_long_to_float.S
new file mode 100644
index 0000000000..e9c9145cee
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_long_to_float.S
@@ -0,0 +1 @@
+%include "arm64/funopNarrower.S" {"instr":"scvtf s0, x0", "srcreg":"x0", "tgtreg":"s0"}
diff --git a/runtime/interpreter/mterp/arm64/op_long_to_int.S b/runtime/interpreter/mterp/arm64/op_long_to_int.S
new file mode 100644
index 0000000000..360a69b908
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_long_to_int.S
@@ -0,0 +1 @@
+%include "arm64/funopNarrower.S" {"instr":"", "srcreg":"x0", "tgtreg":"w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_monitor_enter.S b/runtime/interpreter/mterp/arm64/op_monitor_enter.S
new file mode 100644
index 0000000000..6fbd9ae725
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_monitor_enter.S
@@ -0,0 +1,13 @@
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC
+ lsr w2, wINST, #8 // w2<- AA
+ GET_VREG w0, w2 // w0<- vAA (object)
+ mov x1, xSELF // w1<- self
+ bl artLockObjectFromCode
+ cbnz w0, MterpException
+ FETCH_ADVANCE_INST 1
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_monitor_exit.S b/runtime/interpreter/mterp/arm64/op_monitor_exit.S
new file mode 100644
index 0000000000..26e2d8d7b1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_monitor_exit.S
@@ -0,0 +1,17 @@
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC
+ lsr w2, wINST, #8 // w2<- AA
+ GET_VREG w0, w2 // w0<- vAA (object)
+ mov x1, xSELF // w0<- self
+ bl artUnlockObjectFromCode // w0<- success for unlock(self, obj)
+ cbnz w0, MterpException
+ FETCH_ADVANCE_INST 1 // before throw: advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move.S b/runtime/interpreter/mterp/arm64/op_move.S
new file mode 100644
index 0000000000..195b7eb62d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_move.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ lsr w1, wINST, #12 // x1<- B from 15:12
+ ubfx w0, wINST, #8, #4 // x0<- A from 11:8
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ GET_VREG w2, w1 // x2<- fp[B]
+ GET_INST_OPCODE ip // ip<- opcode from wINST
+ .if $is_object
+ SET_VREG_OBJECT w2, w0 // fp[A]<- x2
+ .else
+ SET_VREG w2, w0 // fp[A]<- x2
+ .endif
+ GOTO_OPCODE ip // execute next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_16.S b/runtime/interpreter/mterp/arm64/op_move_16.S
new file mode 100644
index 0000000000..5146e3d6e7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_move_16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH w1, 2 // w1<- BBBB
+ FETCH w0, 1 // w0<- AAAA
+ FETCH_ADVANCE_INST 3 // advance xPC, load xINST
+ GET_VREG w2, w1 // w2<- fp[BBBB]
+ GET_INST_OPCODE ip // extract opcode from xINST
+ .if $is_object
+ SET_VREG_OBJECT w2, w0 // fp[AAAA]<- w2
+ .else
+ SET_VREG w2, w0 // fp[AAAA]<- w2
+ .endif
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_exception.S b/runtime/interpreter/mterp/arm64/op_move_exception.S
new file mode 100644
index 0000000000..b29298fd14
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_move_exception.S
@@ -0,0 +1,9 @@
+ /* move-exception vAA */
+ lsr w2, wINST, #8 // w2<- AA
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ mov x1, #0 // w1<- 0
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ SET_VREG_OBJECT w3, w2 // fp[AA]<- exception obj
+ GET_INST_OPCODE ip // extract opcode from rINST
+ str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // clear exception
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_from16.S b/runtime/interpreter/mterp/arm64/op_move_from16.S
new file mode 100644
index 0000000000..78f344db6b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_move_from16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH w1, 1 // r1<- BBBB
+ lsr w0, wINST, #8 // r0<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ GET_VREG w2, w1 // r2<- fp[BBBB]
+ GET_INST_OPCODE ip // extract opcode from wINST
+ .if $is_object
+ SET_VREG_OBJECT w2, w0 // fp[AA]<- r2
+ .else
+ SET_VREG w2, w0 // fp[AA]<- r2
+ .endif
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_object.S b/runtime/interpreter/mterp/arm64/op_move_object.S
new file mode 100644
index 0000000000..a5adc59e81
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_move_object.S
@@ -0,0 +1 @@
+%include "arm64/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_move_object_16.S b/runtime/interpreter/mterp/arm64/op_move_object_16.S
new file mode 100644
index 0000000000..ef86c4508b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_move_object_16.S
@@ -0,0 +1 @@
+%include "arm64/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_move_object_from16.S b/runtime/interpreter/mterp/arm64/op_move_object_from16.S
new file mode 100644
index 0000000000..0c73b3b045
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_move_object_from16.S
@@ -0,0 +1 @@
+%include "arm64/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_move_result.S b/runtime/interpreter/mterp/arm64/op_move_result.S
new file mode 100644
index 0000000000..06fe96269b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_move_result.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ lsr w2, wINST, #8 // r2<- AA
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
+ ldr w0, [x0] // r0 <- result.i.
+ GET_INST_OPCODE ip // extract opcode from wINST
+ .if $is_object
+ SET_VREG_OBJECT w0, w2, w1 // fp[AA]<- r0
+ .else
+ SET_VREG w0, w2 // fp[AA]<- r0
+ .endif
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_result_object.S b/runtime/interpreter/mterp/arm64/op_move_result_object.S
new file mode 100644
index 0000000000..da2bbee665
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_move_result_object.S
@@ -0,0 +1 @@
+%include "arm64/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_move_result_wide.S b/runtime/interpreter/mterp/arm64/op_move_result_wide.S
new file mode 100644
index 0000000000..f90a33f01f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_move_result_wide.S
@@ -0,0 +1,9 @@
+ /* for: move-result-wide */
+ /* op vAA */
+ lsr w2, wINST, #8 // r2<- AA
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
+ ldr x0, [x0] // r0 <- result.i.
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x0, x2 // fp[AA]<- r0
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_wide.S b/runtime/interpreter/mterp/arm64/op_move_wide.S
new file mode 100644
index 0000000000..538f079736
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_move_wide.S
@@ -0,0 +1,9 @@
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE x3, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x3, w2
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_wide_16.S b/runtime/interpreter/mterp/arm64/op_move_wide_16.S
new file mode 100644
index 0000000000..c79cdc5007
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_move_wide_16.S
@@ -0,0 +1,9 @@
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH w3, 2 // w3<- BBBB
+ FETCH w2, 1 // w2<- AAAA
+ GET_VREG_WIDE x3, w3
+ FETCH_ADVANCE_INST 3 // advance rPC, load rINST
+ SET_VREG_WIDE x3, w2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_wide_from16.S b/runtime/interpreter/mterp/arm64/op_move_wide_from16.S
new file mode 100644
index 0000000000..70dbe99039
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_move_wide_from16.S
@@ -0,0 +1,9 @@
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH w3, 1 // w3<- BBBB
+ lsr w2, wINST, #8 // w2<- AA
+ GET_VREG_WIDE x3, w3
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x3, w2
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_mul_double.S b/runtime/interpreter/mterp/arm64/op_mul_double.S
new file mode 100644
index 0000000000..8d35b81b12
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_mul_double.S
@@ -0,0 +1 @@
+%include "arm64/binopWide.S" {"instr":"fmul d0, d1, d2", "result":"d0", "r1":"d1", "r2":"d2"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_double_2addr.S b/runtime/interpreter/mterp/arm64/op_mul_double_2addr.S
new file mode 100644
index 0000000000..526cb3bccc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_mul_double_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binopWide2addr.S" {"instr":"fmul d0, d0, d1", "r0":"d0", "r1":"d1"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_float.S b/runtime/interpreter/mterp/arm64/op_mul_float.S
new file mode 100644
index 0000000000..eea7733dbf
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_mul_float.S
@@ -0,0 +1 @@
+%include "arm64/fbinop.S" {"instr":"fmul s0, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_float_2addr.S b/runtime/interpreter/mterp/arm64/op_mul_float_2addr.S
new file mode 100644
index 0000000000..c1f23765aa
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_mul_float_2addr.S
@@ -0,0 +1 @@
+%include "arm64/fbinop2addr.S" {"instr":"fmul s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_int.S b/runtime/interpreter/mterp/arm64/op_mul_int.S
new file mode 100644
index 0000000000..d14cae12e4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_mul_int.S
@@ -0,0 +1,2 @@
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+%include "arm64/binop.S" {"instr":"mul w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_int_2addr.S b/runtime/interpreter/mterp/arm64/op_mul_int_2addr.S
new file mode 100644
index 0000000000..f079118172
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_mul_int_2addr.S
@@ -0,0 +1,2 @@
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+%include "arm64/binop2addr.S" {"instr":"mul w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_int_lit16.S b/runtime/interpreter/mterp/arm64/op_mul_int_lit16.S
new file mode 100644
index 0000000000..a378559541
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_mul_int_lit16.S
@@ -0,0 +1,2 @@
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+%include "arm64/binopLit16.S" {"instr":"mul w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_int_lit8.S b/runtime/interpreter/mterp/arm64/op_mul_int_lit8.S
new file mode 100644
index 0000000000..b3d40141ae
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_mul_int_lit8.S
@@ -0,0 +1,2 @@
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+%include "arm64/binopLit8.S" {"instr":"mul w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_long.S b/runtime/interpreter/mterp/arm64/op_mul_long.S
new file mode 100644
index 0000000000..bc0dcbd14b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_mul_long.S
@@ -0,0 +1 @@
+%include "arm64/binopWide.S" {"instr":"mul x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_long_2addr.S b/runtime/interpreter/mterp/arm64/op_mul_long_2addr.S
new file mode 100644
index 0000000000..fa1cdf8a72
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_mul_long_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binopWide2addr.S" {"instr":"mul x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_neg_double.S b/runtime/interpreter/mterp/arm64/op_neg_double.S
new file mode 100644
index 0000000000..e9064c47ce
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_neg_double.S
@@ -0,0 +1 @@
+%include "arm64/unopWide.S" {"preinstr":"mov x1, #0x8000000000000000", "instr":"add x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_neg_float.S b/runtime/interpreter/mterp/arm64/op_neg_float.S
new file mode 100644
index 0000000000..49d51afa5b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_neg_float.S
@@ -0,0 +1 @@
+%include "arm64/unop.S" {"preinstr":"mov w4, #0x80000000", "instr":"add w0, w0, w4"}
diff --git a/runtime/interpreter/mterp/arm64/op_neg_int.S b/runtime/interpreter/mterp/arm64/op_neg_int.S
new file mode 100644
index 0000000000..59c14a9087
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_neg_int.S
@@ -0,0 +1 @@
+%include "arm64/unop.S" {"instr":"sub w0, wzr, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_neg_long.S b/runtime/interpreter/mterp/arm64/op_neg_long.S
new file mode 100644
index 0000000000..0c71ea7de6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_neg_long.S
@@ -0,0 +1 @@
+%include "arm64/unopWide.S" {"instr":"sub x0, xzr, x0"}
diff --git a/runtime/interpreter/mterp/arm64/op_new_array.S b/runtime/interpreter/mterp/arm64/op_new_array.S
new file mode 100644
index 0000000000..886120a94e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_new_array.S
@@ -0,0 +1,18 @@
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class//CCCC */
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov w2, wINST
+ mov x3, xSELF
+ bl MterpNewArray
+ cbz w0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_new_instance.S b/runtime/interpreter/mterp/arm64/op_new_instance.S
new file mode 100644
index 0000000000..c171ac58f6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_new_instance.S
@@ -0,0 +1,13 @@
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class//BBBB */
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xSELF
+ mov w2, wINST
+ bl MterpNewInstance // (shadow_frame, self, inst_data)
+ cbz w0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_nop.S b/runtime/interpreter/mterp/arm64/op_nop.S
new file mode 100644
index 0000000000..80c2d452f8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_nop.S
@@ -0,0 +1,3 @@
+ FETCH_ADVANCE_INST 1 // advance to next instr, load rINST
+ GET_INST_OPCODE ip // ip<- opcode from rINST
+ GOTO_OPCODE ip // execute it
diff --git a/runtime/interpreter/mterp/arm64/op_not_int.S b/runtime/interpreter/mterp/arm64/op_not_int.S
new file mode 100644
index 0000000000..55d77502aa
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_not_int.S
@@ -0,0 +1 @@
+%include "arm64/unop.S" {"instr":"mvn w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_not_long.S b/runtime/interpreter/mterp/arm64/op_not_long.S
new file mode 100644
index 0000000000..e5ebdd6e65
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_not_long.S
@@ -0,0 +1 @@
+%include "arm64/unopWide.S" {"instr":"mvn x0, x0"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_int.S b/runtime/interpreter/mterp/arm64/op_or_int.S
new file mode 100644
index 0000000000..648c1e6850
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_or_int.S
@@ -0,0 +1 @@
+%include "arm64/binop.S" {"instr":"orr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_int_2addr.S b/runtime/interpreter/mterp/arm64/op_or_int_2addr.S
new file mode 100644
index 0000000000..abdf599fe9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_or_int_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binop2addr.S" {"instr":"orr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_int_lit16.S b/runtime/interpreter/mterp/arm64/op_or_int_lit16.S
new file mode 100644
index 0000000000..db7f4ffb0a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_or_int_lit16.S
@@ -0,0 +1 @@
+%include "arm64/binopLit16.S" {"instr":"orr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_int_lit8.S b/runtime/interpreter/mterp/arm64/op_or_int_lit8.S
new file mode 100644
index 0000000000..51675f8b82
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_or_int_lit8.S
@@ -0,0 +1 @@
+%include "arm64/binopLit8.S" {"instr":"orr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_long.S b/runtime/interpreter/mterp/arm64/op_or_long.S
new file mode 100644
index 0000000000..dd137ce85b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_or_long.S
@@ -0,0 +1 @@
+%include "arm64/binopWide.S" {"instr":"orr x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_long_2addr.S b/runtime/interpreter/mterp/arm64/op_or_long_2addr.S
new file mode 100644
index 0000000000..f785230e1c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_or_long_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binopWide2addr.S" {"instr":"orr x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_packed_switch.S b/runtime/interpreter/mterp/arm64/op_packed_switch.S
new file mode 100644
index 0000000000..f087d23c0a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_packed_switch.S
@@ -0,0 +1,39 @@
+%default { "func":"MterpDoPackedSwitch" }
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+#if MTERP_SUSPEND
+ FETCH w0, 1 // w0<- bbbb (lo)
+ FETCH w1, 2 // w1<- BBBB (hi)
+ mov w3, wINST, lsr #8 // w3<- AA
+ orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
+ GET_VREG w1, w3 // w1<- vAA
+ add w0, rPC, w0, lsl #1 // w0<- PC + BBBBbbbb*2
+ bl $func // w0<- code-unit branch offset
+ adds w1, w0, w0 // w1<- byte offset; clear V
+ ldrle rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh handler base
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ FETCH w0, 1 // w0<- bbbb (lo)
+ FETCH w1, 2 // w1<- BBBB (hi)
+ lsr w3, wINST, #8 // w3<- AA
+ orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
+ GET_VREG w1, w3 // w1<- vAA
+ add x0, xPC, w0, lsl #1 // w0<- PC + BBBBbbbb*2
+ bl $func // w0<- code-unit branch offset
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ adds w1, w0, w0 // w1<- byte offset; clear V
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load wINST
+ b.le MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm64/op_rem_double.S b/runtime/interpreter/mterp/arm64/op_rem_double.S
new file mode 100644
index 0000000000..c631ddbfe5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_rem_double.S
@@ -0,0 +1,13 @@
+ /* rem vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE d1, w2 // d1<- vCC
+ GET_VREG_WIDE d0, w1 // d0<- vBB
+ bl fmod
+ lsr w4, wINST, #8 // w4<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w4 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm64/op_rem_double_2addr.S b/runtime/interpreter/mterp/arm64/op_rem_double_2addr.S
new file mode 100644
index 0000000000..db18aa7ee2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_rem_double_2addr.S
@@ -0,0 +1,12 @@
+ /* rem vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE d1, w1 // d1<- vB
+ GET_VREG_WIDE d0, w2 // d0<- vA
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ bl fmod
+ ubfx w2, wINST, #8, #4 // w2<- A (need to reload - killed across call)
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/op_rem_float.S b/runtime/interpreter/mterp/arm64/op_rem_float.S
new file mode 100644
index 0000000000..73f7060cf5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_rem_float.S
@@ -0,0 +1,2 @@
+/* EABI doesn't define a float remainder function, but libm does */
+%include "arm64/fbinop.S" {"instr":"bl fmodf"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_float_2addr.S b/runtime/interpreter/mterp/arm64/op_rem_float_2addr.S
new file mode 100644
index 0000000000..0b918910c7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_rem_float_2addr.S
@@ -0,0 +1,13 @@
+ /* rem vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w9, wINST, #8 // w9<- A+
+ and w9, w9, #15 // w9<- A
+ GET_VREG s1, w3
+ GET_VREG s0, w9
+ bl fmodf
+ lsr w9, wINST, #8 // w9<- A+
+ and w9, w9, #15 // w9<- A
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s0, w9
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_rem_int.S b/runtime/interpreter/mterp/arm64/op_rem_int.S
new file mode 100644
index 0000000000..dd9dfda088
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_rem_int.S
@@ -0,0 +1 @@
+%include "arm64/binop.S" {"preinstr":"sdiv w2, w0, w1", "instr":"msub w0, w2, w1, w0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_int_2addr.S b/runtime/interpreter/mterp/arm64/op_rem_int_2addr.S
new file mode 100644
index 0000000000..57fc4971b9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_rem_int_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binop2addr.S" {"preinstr":"sdiv w2, w0, w1", "instr":"msub w0, w2, w1, w0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_int_lit16.S b/runtime/interpreter/mterp/arm64/op_rem_int_lit16.S
new file mode 100644
index 0000000000..b51a739d2e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_rem_int_lit16.S
@@ -0,0 +1 @@
+%include "arm64/binopLit16.S" {"preinstr":"sdiv w3, w0, w1", "instr":"msub w0, w3, w1, w0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_int_lit8.S b/runtime/interpreter/mterp/arm64/op_rem_int_lit8.S
new file mode 100644
index 0000000000..03ea32420b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_rem_int_lit8.S
@@ -0,0 +1 @@
+%include "arm64/binopLit8.S" {"preinstr":"sdiv w3, w0, w1", "instr":"msub w0, w3, w1, w0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_long.S b/runtime/interpreter/mterp/arm64/op_rem_long.S
new file mode 100644
index 0000000000..f133f86a6c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_rem_long.S
@@ -0,0 +1 @@
+%include "arm64/binopWide.S" {"preinstr":"sdiv x3, x1, x2","instr":"msub x0, x3, x2, x1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_long_2addr.S b/runtime/interpreter/mterp/arm64/op_rem_long_2addr.S
new file mode 100644
index 0000000000..b45e2a95c1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_rem_long_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binopWide2addr.S" {"preinstr":"sdiv x3, x0, x1", "instr":"msub x0, x3, x1, x0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_return.S b/runtime/interpreter/mterp/arm64/op_return.S
new file mode 100644
index 0000000000..28630eed48
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_return.S
@@ -0,0 +1,19 @@
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov x0, xSELF
+ ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.ne .L${opcode}_check
+.L${opcode}_return:
+ lsr w2, wINST, #8 // r2<- AA
+ GET_VREG w0, w2 // r0<- vAA
+ b MterpReturn
+.L${opcode}_check:
+ bl MterpSuspendCheck // (self)
+ b .L${opcode}_return
diff --git a/runtime/interpreter/mterp/arm64/op_return_object.S b/runtime/interpreter/mterp/arm64/op_return_object.S
new file mode 100644
index 0000000000..b6cb532b53
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_return_object.S
@@ -0,0 +1 @@
+%include "arm64/op_return.S"
diff --git a/runtime/interpreter/mterp/arm64/op_return_void.S b/runtime/interpreter/mterp/arm64/op_return_void.S
new file mode 100644
index 0000000000..3a5aa56162
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_return_void.S
@@ -0,0 +1,12 @@
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov x0, xSELF
+ ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.ne .L${opcode}_check
+.L${opcode}_return:
+ mov x0, #0
+ b MterpReturn
+.L${opcode}_check:
+ bl MterpSuspendCheck // (self)
+ b .L${opcode}_return
diff --git a/runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S
new file mode 100644
index 0000000000..1e0695374d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S
@@ -0,0 +1,10 @@
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov x0, xSELF
+ ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.ne .L${opcode}_check
+.L${opcode}_return:
+ mov x0, #0
+ b MterpReturn
+.L${opcode}_check:
+ bl MterpSuspendCheck // (self)
+ b .L${opcode}_return
diff --git a/runtime/interpreter/mterp/arm64/op_return_wide.S b/runtime/interpreter/mterp/arm64/op_return_wide.S
new file mode 100644
index 0000000000..c6e1d9da80
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_return_wide.S
@@ -0,0 +1,18 @@
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov x0, xSELF
+ ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.ne .L${opcode}_check
+.L${opcode}_return:
+ lsr w2, wINST, #8 // w2<- AA
+ GET_VREG_WIDE x0, w2 // x0<- vAA
+ b MterpReturn
+.L${opcode}_check:
+ bl MterpSuspendCheck // (self)
+ b .L${opcode}_return
diff --git a/runtime/interpreter/mterp/arm64/op_rsub_int.S b/runtime/interpreter/mterp/arm64/op_rsub_int.S
new file mode 100644
index 0000000000..3bf45fe654
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_rsub_int.S
@@ -0,0 +1,2 @@
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%include "arm64/binopLit16.S" {"instr":"sub w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_rsub_int_lit8.S b/runtime/interpreter/mterp/arm64/op_rsub_int_lit8.S
new file mode 100644
index 0000000000..7a3572b364
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_rsub_int_lit8.S
@@ -0,0 +1 @@
+%include "arm64/binopLit8.S" {"instr":"sub w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget.S b/runtime/interpreter/mterp/arm64/op_sget.S
new file mode 100644
index 0000000000..6352ce0597
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sget.S
@@ -0,0 +1,27 @@
+%default { "is_object":"0", "helper":"artGet32StaticFromCode", "extend":"" }
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+
+ .extern $helper
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref BBBB
+ ldr x1, [xFP, #OFF_FP_METHOD]
+ mov x2, xSELF
+ bl $helper
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w2, wINST, #8 // w2<- AA
+ $extend
+ PREFETCH_INST 2
+ cbnz x3, MterpException // bail out
+.if $is_object
+ SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
+.else
+ SET_VREG w0, w2 // fp[AA]<- w0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip
diff --git a/runtime/interpreter/mterp/arm64/op_sget_boolean.S b/runtime/interpreter/mterp/arm64/op_sget_boolean.S
new file mode 100644
index 0000000000..c40dbdd7d7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sget_boolean.S
@@ -0,0 +1 @@
+%include "arm64/op_sget.S" {"helper":"artGetBooleanStaticFromCode", "extend":"uxtb w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_byte.S b/runtime/interpreter/mterp/arm64/op_sget_byte.S
new file mode 100644
index 0000000000..6cf69a382a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sget_byte.S
@@ -0,0 +1 @@
+%include "arm64/op_sget.S" {"helper":"artGetByteStaticFromCode", "extend":"sxtb w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_char.S b/runtime/interpreter/mterp/arm64/op_sget_char.S
new file mode 100644
index 0000000000..8924a349ab
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sget_char.S
@@ -0,0 +1 @@
+%include "arm64/op_sget.S" {"helper":"artGetCharStaticFromCode", "extend":"uxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_object.S b/runtime/interpreter/mterp/arm64/op_sget_object.S
new file mode 100644
index 0000000000..620b0bab00
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sget_object.S
@@ -0,0 +1 @@
+%include "arm64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_short.S b/runtime/interpreter/mterp/arm64/op_sget_short.S
new file mode 100644
index 0000000000..19dbba6f74
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sget_short.S
@@ -0,0 +1 @@
+%include "arm64/op_sget.S" {"helper":"artGetShortStaticFromCode", "extend":"sxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_wide.S b/runtime/interpreter/mterp/arm64/op_sget_wide.S
new file mode 100644
index 0000000000..287f66daeb
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sget_wide.S
@@ -0,0 +1,19 @@
+ /*
+ * SGET_WIDE handler wrapper.
+ *
+ */
+ /* sget-wide vAA, field//BBBB */
+
+ .extern artGet64StaticFromCode
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref BBBB
+ ldr x1, [xFP, #OFF_FP_METHOD]
+ mov x2, xSELF
+ bl artGet64StaticFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w4, wINST, #8 // w4<- AA
+ cbnz x3, MterpException // bail out
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ SET_VREG_WIDE x0, w4
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_shl_int.S b/runtime/interpreter/mterp/arm64/op_shl_int.S
new file mode 100644
index 0000000000..bd0f237cfe
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_shl_int.S
@@ -0,0 +1 @@
+%include "arm64/binop.S" {"preinstr":"and w1, w1, #31", "instr":"lsl w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_int_2addr.S b/runtime/interpreter/mterp/arm64/op_shl_int_2addr.S
new file mode 100644
index 0000000000..b4671d2f1c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_shl_int_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binop2addr.S" {"preinstr":"and w1, w1, #31", "instr":"lsl w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_int_lit8.S b/runtime/interpreter/mterp/arm64/op_shl_int_lit8.S
new file mode 100644
index 0000000000..4dd32e08a2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_shl_int_lit8.S
@@ -0,0 +1 @@
+%include "arm64/binopLit8.S" {"preinstr":"and w1, w1, #31", "instr":"lsl w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_long.S b/runtime/interpreter/mterp/arm64/op_shl_long.S
new file mode 100644
index 0000000000..bbf9600953
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_shl_long.S
@@ -0,0 +1 @@
+%include "arm64/shiftWide.S" {"opcode":"lsl"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_long_2addr.S b/runtime/interpreter/mterp/arm64/op_shl_long_2addr.S
new file mode 100644
index 0000000000..a5c4013bf7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_shl_long_2addr.S
@@ -0,0 +1 @@
+%include "arm64/shiftWide2addr.S" {"opcode":"lsl"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_int.S b/runtime/interpreter/mterp/arm64/op_shr_int.S
new file mode 100644
index 0000000000..c214a18fd0
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_shr_int.S
@@ -0,0 +1 @@
+%include "arm64/binop.S" {"preinstr":"and w1, w1, #31", "instr":"asr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_int_2addr.S b/runtime/interpreter/mterp/arm64/op_shr_int_2addr.S
new file mode 100644
index 0000000000..3c1484b0c7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_shr_int_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binop2addr.S" {"preinstr":"and w1, w1, #31", "instr":"asr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_int_lit8.S b/runtime/interpreter/mterp/arm64/op_shr_int_lit8.S
new file mode 100644
index 0000000000..26d5024a2c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_shr_int_lit8.S
@@ -0,0 +1 @@
+%include "arm64/binopLit8.S" {"preinstr":"and w1, w1, #31", "instr":"asr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_long.S b/runtime/interpreter/mterp/arm64/op_shr_long.S
new file mode 100644
index 0000000000..4d332359ab
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_shr_long.S
@@ -0,0 +1 @@
+%include "arm64/shiftWide.S" {"opcode":"asr"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_long_2addr.S b/runtime/interpreter/mterp/arm64/op_shr_long_2addr.S
new file mode 100644
index 0000000000..0a4a386c95
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_shr_long_2addr.S
@@ -0,0 +1 @@
+%include "arm64/shiftWide2addr.S" {"opcode":"asr"}
diff --git a/runtime/interpreter/mterp/arm64/op_sparse_switch.S b/runtime/interpreter/mterp/arm64/op_sparse_switch.S
new file mode 100644
index 0000000000..5a8d7489bc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sparse_switch.S
@@ -0,0 +1 @@
+%include "arm64/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/arm64/op_sput.S b/runtime/interpreter/mterp/arm64/op_sput.S
new file mode 100644
index 0000000000..75f27abdcc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sput.S
@@ -0,0 +1,19 @@
+%default { "helper":"artSet32StaticFromCode"}
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field//BBBB */
+ EXPORT_PC
+ FETCH w0, 1 // r0<- field ref BBBB
+ lsr w3, wINST, #8 // r3<- AA
+ GET_VREG w1, w3 // r1<= fp[AA]
+ ldr x2, [xFP, #OFF_FP_METHOD]
+ mov x3, xSELF
+ PREFETCH_INST 2 // Get next inst, but don't advance rPC
+ bl $helper
+ cbnz w0, MterpException // 0 on success
+ ADVANCE 2 // Past exception point - now advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_sput_boolean.S b/runtime/interpreter/mterp/arm64/op_sput_boolean.S
new file mode 100644
index 0000000000..11c55e529b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sput_boolean.S
@@ -0,0 +1 @@
+%include "arm64/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_byte.S b/runtime/interpreter/mterp/arm64/op_sput_byte.S
new file mode 100644
index 0000000000..11c55e529b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sput_byte.S
@@ -0,0 +1 @@
+%include "arm64/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_char.S b/runtime/interpreter/mterp/arm64/op_sput_char.S
new file mode 100644
index 0000000000..b4dd5aa76c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sput_char.S
@@ -0,0 +1 @@
+%include "arm64/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_object.S b/runtime/interpreter/mterp/arm64/op_sput_object.S
new file mode 100644
index 0000000000..c176da273f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sput_object.S
@@ -0,0 +1,10 @@
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov x2, xINST
+ mov x3, xSELF
+ bl MterpSputObject
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_sput_short.S b/runtime/interpreter/mterp/arm64/op_sput_short.S
new file mode 100644
index 0000000000..b4dd5aa76c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sput_short.S
@@ -0,0 +1 @@
+%include "arm64/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_wide.S b/runtime/interpreter/mterp/arm64/op_sput_wide.S
new file mode 100644
index 0000000000..1d034ecf2f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sput_wide.S
@@ -0,0 +1,18 @@
+ /*
+ * SPUT_WIDE handler wrapper.
+ *
+ */
+ /* sput-wide vAA, field//BBBB */
+ .extern artSet64IndirectStaticFromMterp
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref BBBB
+ ldr x1, [xFP, #OFF_FP_METHOD]
+ lsr w2, wINST, #8 // w3<- AA
+ add x2, xFP, w2, lsl #2
+ mov x3, xSELF
+ PREFETCH_INST 2 // Get next inst, but don't advance rPC
+ bl artSet64IndirectStaticFromMterp
+ cbnz w0, MterpException // 0 on success, -1 on failure
+ ADVANCE 2 // Past exception point - now advance rPC
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_sub_double.S b/runtime/interpreter/mterp/arm64/op_sub_double.S
new file mode 100644
index 0000000000..e8e3401e17
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sub_double.S
@@ -0,0 +1 @@
+%include "arm64/binopWide.S" {"instr":"fsub d0, d1, d2", "result":"d0", "r1":"d1", "r2":"d2"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_double_2addr.S b/runtime/interpreter/mterp/arm64/op_sub_double_2addr.S
new file mode 100644
index 0000000000..ddab55e9fe
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sub_double_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binopWide2addr.S" {"instr":"fsub d0, d0, d1", "r0":"d0", "r1":"d1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_float.S b/runtime/interpreter/mterp/arm64/op_sub_float.S
new file mode 100644
index 0000000000..227b15fdf3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sub_float.S
@@ -0,0 +1 @@
+%include "arm64/fbinop.S" {"instr":"fsub s0, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_float_2addr.S b/runtime/interpreter/mterp/arm64/op_sub_float_2addr.S
new file mode 100644
index 0000000000..19ac8d5616
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sub_float_2addr.S
@@ -0,0 +1 @@
+%include "arm64/fbinop2addr.S" {"instr":"fsub s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_int.S b/runtime/interpreter/mterp/arm64/op_sub_int.S
new file mode 100644
index 0000000000..0e7ce0e6e5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sub_int.S
@@ -0,0 +1 @@
+%include "arm64/binop.S" {"instr":"sub w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_int_2addr.S b/runtime/interpreter/mterp/arm64/op_sub_int_2addr.S
new file mode 100644
index 0000000000..d2c1bd307a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sub_int_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binop2addr.S" {"instr":"sub w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_long.S b/runtime/interpreter/mterp/arm64/op_sub_long.S
new file mode 100644
index 0000000000..263c70d6e7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sub_long.S
@@ -0,0 +1 @@
+%include "arm64/binopWide.S" {"instr":"sub x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_long_2addr.S b/runtime/interpreter/mterp/arm64/op_sub_long_2addr.S
new file mode 100644
index 0000000000..5be3772670
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_sub_long_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binopWide2addr.S" {"instr":"sub x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_throw.S b/runtime/interpreter/mterp/arm64/op_throw.S
new file mode 100644
index 0000000000..9a951af302
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_throw.S
@@ -0,0 +1,10 @@
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ lsr w2, wINST, #8 // r2<- AA
+ GET_VREG w1, w2 // r1<- vAA (exception object)
+ cbz w1, common_errNullObject
+ str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // thread->exception<- obj
+ b MterpException
diff --git a/runtime/interpreter/mterp/arm64/op_unused_3e.S b/runtime/interpreter/mterp/arm64/op_unused_3e.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_3e.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_3f.S b/runtime/interpreter/mterp/arm64/op_unused_3f.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_3f.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_40.S b/runtime/interpreter/mterp/arm64/op_unused_40.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_40.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_41.S b/runtime/interpreter/mterp/arm64/op_unused_41.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_41.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_42.S b/runtime/interpreter/mterp/arm64/op_unused_42.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_42.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_43.S b/runtime/interpreter/mterp/arm64/op_unused_43.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_43.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_73.S b/runtime/interpreter/mterp/arm64/op_unused_73.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_73.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_79.S b/runtime/interpreter/mterp/arm64/op_unused_79.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_79.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_7a.S b/runtime/interpreter/mterp/arm64/op_unused_7a.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_7a.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f3.S b/runtime/interpreter/mterp/arm64/op_unused_f3.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_f3.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f4.S b/runtime/interpreter/mterp/arm64/op_unused_f4.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_f4.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f5.S b/runtime/interpreter/mterp/arm64/op_unused_f5.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_f5.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f6.S b/runtime/interpreter/mterp/arm64/op_unused_f6.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_f6.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f7.S b/runtime/interpreter/mterp/arm64/op_unused_f7.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_f7.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f8.S b/runtime/interpreter/mterp/arm64/op_unused_f8.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_f8.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f9.S b/runtime/interpreter/mterp/arm64/op_unused_f9.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_f9.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_fa.S b/runtime/interpreter/mterp/arm64/op_unused_fa.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_fa.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_fb.S b/runtime/interpreter/mterp/arm64/op_unused_fb.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_fb.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_fc.S b/runtime/interpreter/mterp/arm64/op_unused_fc.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_fc.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_fd.S b/runtime/interpreter/mterp/arm64/op_unused_fd.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_fd.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_fe.S b/runtime/interpreter/mterp/arm64/op_unused_fe.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_fe.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_ff.S b/runtime/interpreter/mterp/arm64/op_unused_ff.S
new file mode 100644
index 0000000000..204eceff7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_unused_ff.S
@@ -0,0 +1 @@
+%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_int.S b/runtime/interpreter/mterp/arm64/op_ushr_int.S
new file mode 100644
index 0000000000..bb8382b925
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_ushr_int.S
@@ -0,0 +1 @@
+%include "arm64/binop.S" {"preinstr":"and w1, w1, #31", "instr":"lsr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_int_2addr.S b/runtime/interpreter/mterp/arm64/op_ushr_int_2addr.S
new file mode 100644
index 0000000000..dbccb9952a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_ushr_int_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binop2addr.S" {"preinstr":"and w1, w1, #31", "instr":"lsr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S b/runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S
new file mode 100644
index 0000000000..35090c46d7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S
@@ -0,0 +1 @@
+%include "arm64/binopLit8.S" {"preinstr":"and w1, w1, #31", "instr":"lsr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_long.S b/runtime/interpreter/mterp/arm64/op_ushr_long.S
new file mode 100644
index 0000000000..e13c86a48b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_ushr_long.S
@@ -0,0 +1 @@
+%include "arm64/shiftWide.S" {"opcode":"lsr"}
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_long_2addr.S b/runtime/interpreter/mterp/arm64/op_ushr_long_2addr.S
new file mode 100644
index 0000000000..67ec91e967
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_ushr_long_2addr.S
@@ -0,0 +1 @@
+%include "arm64/shiftWide2addr.S" {"opcode":"lsr"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_int.S b/runtime/interpreter/mterp/arm64/op_xor_int.S
new file mode 100644
index 0000000000..74836635fe
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_xor_int.S
@@ -0,0 +1 @@
+%include "arm64/binop.S" {"instr":"eor w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_int_2addr.S b/runtime/interpreter/mterp/arm64/op_xor_int_2addr.S
new file mode 100644
index 0000000000..2f9a2c7359
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_xor_int_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binop2addr.S" {"instr":"eor w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_int_lit16.S b/runtime/interpreter/mterp/arm64/op_xor_int_lit16.S
new file mode 100644
index 0000000000..6b72c560f9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_xor_int_lit16.S
@@ -0,0 +1 @@
+%include "arm64/binopLit16.S" {"instr":"eor w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_int_lit8.S b/runtime/interpreter/mterp/arm64/op_xor_int_lit8.S
new file mode 100644
index 0000000000..6d187b5797
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_xor_int_lit8.S
@@ -0,0 +1 @@
+%include "arm64/binopLit8.S" {"instr":"eor w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_long.S b/runtime/interpreter/mterp/arm64/op_xor_long.S
new file mode 100644
index 0000000000..3880d5d19f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_xor_long.S
@@ -0,0 +1 @@
+%include "arm64/binopWide.S" {"instr":"eor x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_long_2addr.S b/runtime/interpreter/mterp/arm64/op_xor_long_2addr.S
new file mode 100644
index 0000000000..36905529d5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/op_xor_long_2addr.S
@@ -0,0 +1 @@
+%include "arm64/binopWide2addr.S" {"instr":"eor x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/shiftWide.S b/runtime/interpreter/mterp/arm64/shiftWide.S
new file mode 100644
index 0000000000..6306fca5cb
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/shiftWide.S
@@ -0,0 +1,20 @@
+%default {"opcode":"shl"}
+ /*
+ * 64-bit shift operation.
+ *
+ * For: shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w3, wINST, #8 // w3<- AA
+ lsr w2, w0, #8 // w2<- CC
+ GET_VREG w2, w2 // w2<- vCC (shift count)
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE x1, w1 // x1<- vBB
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ and x2, x2, #63 // Mask low 6
+ $opcode x0, x1, x2 // Do the shift.
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w3 // vAA<- x0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm64/shiftWide2addr.S b/runtime/interpreter/mterp/arm64/shiftWide2addr.S
new file mode 100644
index 0000000000..77d104a62b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/shiftWide2addr.S
@@ -0,0 +1,16 @@
+%default {"opcode":"lsl"}
+ /*
+ * Generic 64-bit shift operation.
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w1, w1 // x1<- vB
+ GET_VREG_WIDE x0, w2 // x0<- vA
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ and x1, x1, #63 // Mask low 6 bits.
+ $opcode x0, x0, x1
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/unop.S b/runtime/interpreter/mterp/arm64/unop.S
new file mode 100644
index 0000000000..474a961837
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/unop.S
@@ -0,0 +1,20 @@
+%default {"preinstr":""}
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op w0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ GET_VREG w0, w3 // w0<- vB
+ ubfx w9, wINST, #8, #4 // w9<- A
+ $preinstr // optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ $instr // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 8-9 instructions */
diff --git a/runtime/interpreter/mterp/arm64/unopWide.S b/runtime/interpreter/mterp/arm64/unopWide.S
new file mode 100644
index 0000000000..109302a128
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/unopWide.S
@@ -0,0 +1,18 @@
+%default {"instr":"sub x0, xzr, x0", "preinstr":""}
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op x0".
+ *
+ * For: neg-long, not-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w4, wINST, #8, #4 // w4<- A
+ GET_VREG_WIDE x0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ $preinstr
+ $instr
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x0, w4
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-11 instructions */
diff --git a/runtime/interpreter/mterp/arm64/unused.S b/runtime/interpreter/mterp/arm64/unused.S
new file mode 100644
index 0000000000..ffa00becfd
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/unused.S
@@ -0,0 +1,4 @@
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
diff --git a/runtime/interpreter/mterp/arm64/zcmp.S b/runtime/interpreter/mterp/arm64/zcmp.S
new file mode 100644
index 0000000000..d4856d2668
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/zcmp.S
@@ -0,0 +1,33 @@
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov w0, wINST, lsr #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, #0 // compare (vA, 0)
+ mov${condition} w1, #2 // w1<- inst branch dist for not-taken
+ adds w1, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh table base
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w0, wINST, #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Branch offset if not taken
+ cmp w2, #0 // compare (vA, 0)
+ csel w1, w1, w0, ${condition} // Branch if true
+ adds w2, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/config_arm64 b/runtime/interpreter/mterp/config_arm64
index ef3c721df2..57206d25ba 100644
--- a/runtime/interpreter/mterp/config_arm64
+++ b/runtime/interpreter/mterp/config_arm64
@@ -1,3 +1,4 @@
+
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -36,262 +37,262 @@ op-start arm64
# (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
# (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
- op op_nop FALLBACK
- op op_move FALLBACK
- op op_move_from16 FALLBACK
- op op_move_16 FALLBACK
- op op_move_wide FALLBACK
- op op_move_wide_from16 FALLBACK
- op op_move_wide_16 FALLBACK
- op op_move_object FALLBACK
- op op_move_object_from16 FALLBACK
- op op_move_object_16 FALLBACK
- op op_move_result FALLBACK
- op op_move_result_wide FALLBACK
- op op_move_result_object FALLBACK
- op op_move_exception FALLBACK
- op op_return_void FALLBACK
- op op_return FALLBACK
- op op_return_wide FALLBACK
- op op_return_object FALLBACK
- op op_const_4 FALLBACK
- op op_const_16 FALLBACK
- op op_const FALLBACK
- op op_const_high16 FALLBACK
- op op_const_wide_16 FALLBACK
- op op_const_wide_32 FALLBACK
- op op_const_wide FALLBACK
- op op_const_wide_high16 FALLBACK
- op op_const_string FALLBACK
- op op_const_string_jumbo FALLBACK
- op op_const_class FALLBACK
- op op_monitor_enter FALLBACK
- op op_monitor_exit FALLBACK
- op op_check_cast FALLBACK
- op op_instance_of FALLBACK
- op op_array_length FALLBACK
- op op_new_instance FALLBACK
- op op_new_array FALLBACK
- op op_filled_new_array FALLBACK
- op op_filled_new_array_range FALLBACK
- op op_fill_array_data FALLBACK
- op op_throw FALLBACK
- op op_goto FALLBACK
- op op_goto_16 FALLBACK
- op op_goto_32 FALLBACK
- op op_packed_switch FALLBACK
- op op_sparse_switch FALLBACK
- op op_cmpl_float FALLBACK
- op op_cmpg_float FALLBACK
- op op_cmpl_double FALLBACK
- op op_cmpg_double FALLBACK
- op op_cmp_long FALLBACK
- op op_if_eq FALLBACK
- op op_if_ne FALLBACK
- op op_if_lt FALLBACK
- op op_if_ge FALLBACK
- op op_if_gt FALLBACK
- op op_if_le FALLBACK
- op op_if_eqz FALLBACK
- op op_if_nez FALLBACK
- op op_if_ltz FALLBACK
- op op_if_gez FALLBACK
- op op_if_gtz FALLBACK
- op op_if_lez FALLBACK
- op_unused_3e FALLBACK
- op_unused_3f FALLBACK
- op_unused_40 FALLBACK
- op_unused_41 FALLBACK
- op_unused_42 FALLBACK
- op_unused_43 FALLBACK
- op op_aget FALLBACK
- op op_aget_wide FALLBACK
- op op_aget_object FALLBACK
- op op_aget_boolean FALLBACK
- op op_aget_byte FALLBACK
- op op_aget_char FALLBACK
- op op_aget_short FALLBACK
- op op_aput FALLBACK
- op op_aput_wide FALLBACK
- op op_aput_object FALLBACK
- op op_aput_boolean FALLBACK
- op op_aput_byte FALLBACK
- op op_aput_char FALLBACK
- op op_aput_short FALLBACK
- op op_iget FALLBACK
- op op_iget_wide FALLBACK
- op op_iget_object FALLBACK
- op op_iget_boolean FALLBACK
- op op_iget_byte FALLBACK
- op op_iget_char FALLBACK
- op op_iget_short FALLBACK
- op op_iput FALLBACK
- op op_iput_wide FALLBACK
- op op_iput_object FALLBACK
- op op_iput_boolean FALLBACK
- op op_iput_byte FALLBACK
- op op_iput_char FALLBACK
- op op_iput_short FALLBACK
- op op_sget FALLBACK
- op op_sget_wide FALLBACK
- op op_sget_object FALLBACK
- op op_sget_boolean FALLBACK
- op op_sget_byte FALLBACK
- op op_sget_char FALLBACK
- op op_sget_short FALLBACK
- op op_sput FALLBACK
- op op_sput_wide FALLBACK
- op op_sput_object FALLBACK
- op op_sput_boolean FALLBACK
- op op_sput_byte FALLBACK
- op op_sput_char FALLBACK
- op op_sput_short FALLBACK
- op op_invoke_virtual FALLBACK
- op op_invoke_super FALLBACK
- op op_invoke_direct FALLBACK
- op op_invoke_static FALLBACK
- op op_invoke_interface FALLBACK
- op op_return_void_no_barrier FALLBACK
- op op_invoke_virtual_range FALLBACK
- op op_invoke_super_range FALLBACK
- op op_invoke_direct_range FALLBACK
- op op_invoke_static_range FALLBACK
- op op_invoke_interface_range FALLBACK
- op_unused_79 FALLBACK
- op_unused_7a FALLBACK
- op op_neg_int FALLBACK
- op op_not_int FALLBACK
- op op_neg_long FALLBACK
- op op_not_long FALLBACK
- op op_neg_float FALLBACK
- op op_neg_double FALLBACK
- op op_int_to_long FALLBACK
- op op_int_to_float FALLBACK
- op op_int_to_double FALLBACK
- op op_long_to_int FALLBACK
- op op_long_to_float FALLBACK
- op op_long_to_double FALLBACK
- op op_float_to_int FALLBACK
- op op_float_to_long FALLBACK
- op op_float_to_double FALLBACK
- op op_double_to_int FALLBACK
- op op_double_to_long FALLBACK
- op op_double_to_float FALLBACK
- op op_int_to_byte FALLBACK
- op op_int_to_char FALLBACK
- op op_int_to_short FALLBACK
- op op_add_int FALLBACK
- op op_sub_int FALLBACK
- op op_mul_int FALLBACK
- op op_div_int FALLBACK
- op op_rem_int FALLBACK
- op op_and_int FALLBACK
- op op_or_int FALLBACK
- op op_xor_int FALLBACK
- op op_shl_int FALLBACK
- op op_shr_int FALLBACK
- op op_ushr_int FALLBACK
- op op_add_long FALLBACK
- op op_sub_long FALLBACK
- op op_mul_long FALLBACK
- op op_div_long FALLBACK
- op op_rem_long FALLBACK
- op op_and_long FALLBACK
- op op_or_long FALLBACK
- op op_xor_long FALLBACK
- op op_shl_long FALLBACK
- op op_shr_long FALLBACK
- op op_ushr_long FALLBACK
- op op_add_float FALLBACK
- op op_sub_float FALLBACK
- op op_mul_float FALLBACK
- op op_div_float FALLBACK
- op op_rem_float FALLBACK
- op op_add_double FALLBACK
- op op_sub_double FALLBACK
- op op_mul_double FALLBACK
- op op_div_double FALLBACK
- op op_rem_double FALLBACK
- op op_add_int_2addr FALLBACK
- op op_sub_int_2addr FALLBACK
- op op_mul_int_2addr FALLBACK
- op op_div_int_2addr FALLBACK
- op op_rem_int_2addr FALLBACK
- op op_and_int_2addr FALLBACK
- op op_or_int_2addr FALLBACK
- op op_xor_int_2addr FALLBACK
- op op_shl_int_2addr FALLBACK
- op op_shr_int_2addr FALLBACK
- op op_ushr_int_2addr FALLBACK
- op op_add_long_2addr FALLBACK
- op op_sub_long_2addr FALLBACK
- op op_mul_long_2addr FALLBACK
- op op_div_long_2addr FALLBACK
- op op_rem_long_2addr FALLBACK
- op op_and_long_2addr FALLBACK
- op op_or_long_2addr FALLBACK
- op op_xor_long_2addr FALLBACK
- op op_shl_long_2addr FALLBACK
- op op_shr_long_2addr FALLBACK
- op op_ushr_long_2addr FALLBACK
- op op_add_float_2addr FALLBACK
- op op_sub_float_2addr FALLBACK
- op op_mul_float_2addr FALLBACK
- op op_div_float_2addr FALLBACK
- op op_rem_float_2addr FALLBACK
- op op_add_double_2addr FALLBACK
- op op_sub_double_2addr FALLBACK
- op op_mul_double_2addr FALLBACK
- op op_div_double_2addr FALLBACK
- op op_rem_double_2addr FALLBACK
- op op_add_int_lit16 FALLBACK
- op op_rsub_int FALLBACK
- op op_mul_int_lit16 FALLBACK
- op op_div_int_lit16 FALLBACK
- op op_rem_int_lit16 FALLBACK
- op op_and_int_lit16 FALLBACK
- op op_or_int_lit16 FALLBACK
- op op_xor_int_lit16 FALLBACK
- op op_add_int_lit8 FALLBACK
- op op_rsub_int_lit8 FALLBACK
- op op_mul_int_lit8 FALLBACK
- op op_div_int_lit8 FALLBACK
- op op_rem_int_lit8 FALLBACK
- op op_and_int_lit8 FALLBACK
- op op_or_int_lit8 FALLBACK
- op op_xor_int_lit8 FALLBACK
- op op_shl_int_lit8 FALLBACK
- op op_shr_int_lit8 FALLBACK
- op op_ushr_int_lit8 FALLBACK
- op op_iget_quick FALLBACK
- op op_iget_wide_quick FALLBACK
- op op_iget_object_quick FALLBACK
- op op_iput_quick FALLBACK
- op op_iput_wide_quick FALLBACK
- op op_iput_object_quick FALLBACK
- op op_invoke_virtual_quick FALLBACK
- op op_invoke_virtual_range_quick FALLBACK
- op op_iput_boolean_quick FALLBACK
- op op_iput_byte_quick FALLBACK
- op op_iput_char_quick FALLBACK
- op op_iput_short_quick FALLBACK
- op op_iget_boolean_quick FALLBACK
- op op_iget_byte_quick FALLBACK
- op op_iget_char_quick FALLBACK
- op op_iget_short_quick FALLBACK
- op_unused_f3 FALLBACK
- op_unused_f4 FALLBACK
- op_unused_f5 FALLBACK
- op_unused_f6 FALLBACK
- op_unused_f7 FALLBACK
- op_unused_f8 FALLBACK
- op_unused_f9 FALLBACK
- op_unused_fa FALLBACK
- op_unused_fb FALLBACK
- op_unused_fc FALLBACK
- op_unused_fd FALLBACK
- op_unused_fe FALLBACK
- op_unused_ff FALLBACK
+ # op op_nop FALLBACK
+ # op op_move FALLBACK
+ # op op_move_from16 FALLBACK
+ # op op_move_16 FALLBACK
+ # op op_move_wide FALLBACK
+ # op op_move_wide_from16 FALLBACK
+ # op op_move_wide_16 FALLBACK
+ # op op_move_object FALLBACK
+ # op op_move_object_from16 FALLBACK
+ # op op_move_object_16 FALLBACK
+ # op op_move_result FALLBACK
+ # op op_move_result_wide FALLBACK
+ # op op_move_result_object FALLBACK
+ # op op_move_exception FALLBACK
+ # op op_return_void FALLBACK
+ # op op_return FALLBACK
+ # op op_return_wide FALLBACK
+ # op op_return_object FALLBACK
+ # op op_const_4 FALLBACK
+ # op op_const_16 FALLBACK
+ # op op_const FALLBACK
+ # op op_const_high16 FALLBACK
+ # op op_const_wide_16 FALLBACK
+ # op op_const_wide_32 FALLBACK
+ # op op_const_wide FALLBACK
+ # op op_const_wide_high16 FALLBACK
+ # op op_const_string FALLBACK
+ # op op_const_string_jumbo FALLBACK
+ # op op_const_class FALLBACK
+ # op op_monitor_enter FALLBACK
+ # op op_monitor_exit FALLBACK
+ # op op_check_cast FALLBACK
+ # op op_instance_of FALLBACK
+ # op op_array_length FALLBACK
+ # op op_new_instance FALLBACK
+ # op op_new_array FALLBACK
+ # op op_filled_new_array FALLBACK
+ # op op_filled_new_array_range FALLBACK
+ # op op_fill_array_data FALLBACK
+ # op op_throw FALLBACK
+ # op op_goto FALLBACK
+ # op op_goto_16 FALLBACK
+ # op op_goto_32 FALLBACK
+ # op op_packed_switch FALLBACK
+ # op op_sparse_switch FALLBACK
+ # op op_cmpl_float FALLBACK
+ # op op_cmpg_float FALLBACK
+ # op op_cmpl_double FALLBACK
+ # op op_cmpg_double FALLBACK
+ # op op_cmp_long FALLBACK
+ # op op_if_eq FALLBACK
+ # op op_if_ne FALLBACK
+ # op op_if_lt FALLBACK
+ # op op_if_ge FALLBACK
+ # op op_if_gt FALLBACK
+ # op op_if_le FALLBACK
+ # op op_if_eqz FALLBACK
+ # op op_if_nez FALLBACK
+ # op op_if_ltz FALLBACK
+ # op op_if_gez FALLBACK
+ # op op_if_gtz FALLBACK
+ # op op_if_lez FALLBACK
+ # op op_unused_3e FALLBACK
+ # op op_unused_3f FALLBACK
+ # op op_unused_40 FALLBACK
+ # op op_unused_41 FALLBACK
+ # op op_unused_42 FALLBACK
+ # op op_unused_43 FALLBACK
+ # op op_aget FALLBACK
+ # op op_aget_wide FALLBACK
+ # op op_aget_object FALLBACK
+ # op op_aget_boolean FALLBACK
+ # op op_aget_byte FALLBACK
+ # op op_aget_char FALLBACK
+ # op op_aget_short FALLBACK
+ # op op_aput FALLBACK
+ # op op_aput_wide FALLBACK
+ # op op_aput_object FALLBACK
+ # op op_aput_boolean FALLBACK
+ # op op_aput_byte FALLBACK
+ # op op_aput_char FALLBACK
+ # op op_aput_short FALLBACK
+ # op op_iget FALLBACK
+ # op op_iget_wide FALLBACK
+ # op op_iget_object FALLBACK
+ # op op_iget_boolean FALLBACK
+ # op op_iget_byte FALLBACK
+ # op op_iget_char FALLBACK
+ # op op_iget_short FALLBACK
+ # op op_iput FALLBACK
+ # op op_iput_wide FALLBACK
+ # op op_iput_object FALLBACK
+ # op op_iput_boolean FALLBACK
+ # op op_iput_byte FALLBACK
+ # op op_iput_char FALLBACK
+ # op op_iput_short FALLBACK
+ # op op_sget FALLBACK
+ # op op_sget_wide FALLBACK
+ # op op_sget_object FALLBACK
+ # op op_sget_boolean FALLBACK
+ # op op_sget_byte FALLBACK
+ # op op_sget_char FALLBACK
+ # op op_sget_short FALLBACK
+ # op op_sput FALLBACK
+ # op op_sput_wide FALLBACK
+ # op op_sput_object FALLBACK
+ # op op_sput_boolean FALLBACK
+ # op op_sput_byte FALLBACK
+ # op op_sput_char FALLBACK
+ # op op_sput_short FALLBACK
+ # op op_invoke_virtual FALLBACK
+ # op op_invoke_super FALLBACK
+ # op op_invoke_direct FALLBACK
+ # op op_invoke_static FALLBACK
+ # op op_invoke_interface FALLBACK
+ # op op_return_void_no_barrier FALLBACK
+ # op op_invoke_virtual_range FALLBACK
+ # op op_invoke_super_range FALLBACK
+ # op op_invoke_direct_range FALLBACK
+ # op op_invoke_static_range FALLBACK
+ # op op_invoke_interface_range FALLBACK
+ # op op_unused_79 FALLBACK
+ # op op_unused_7a FALLBACK
+ # op op_neg_int FALLBACK
+ # op op_not_int FALLBACK
+ # op op_neg_long FALLBACK
+ # op op_not_long FALLBACK
+ # op op_neg_float FALLBACK
+ # op op_neg_double FALLBACK
+ # op op_int_to_long FALLBACK
+ # op op_int_to_float FALLBACK
+ # op op_int_to_double FALLBACK
+ # op op_long_to_int FALLBACK
+ # op op_long_to_float FALLBACK
+ # op op_long_to_double FALLBACK
+ # op op_float_to_int FALLBACK
+ # op op_float_to_long FALLBACK
+ # op op_float_to_double FALLBACK
+ # op op_double_to_int FALLBACK
+ # op op_double_to_long FALLBACK
+ # op op_double_to_float FALLBACK
+ # op op_int_to_byte FALLBACK
+ # op op_int_to_char FALLBACK
+ # op op_int_to_short FALLBACK
+ # op op_add_int FALLBACK
+ # op op_sub_int FALLBACK
+ # op op_mul_int FALLBACK
+ # op op_div_int FALLBACK
+ # op op_rem_int FALLBACK
+ # op op_and_int FALLBACK
+ # op op_or_int FALLBACK
+ # op op_xor_int FALLBACK
+ # op op_shl_int FALLBACK
+ # op op_shr_int FALLBACK
+ # op op_ushr_int FALLBACK
+ # op op_add_long FALLBACK
+ # op op_sub_long FALLBACK
+ # op op_mul_long FALLBACK
+ # op op_div_long FALLBACK
+ # op op_rem_long FALLBACK
+ # op op_and_long FALLBACK
+ # op op_or_long FALLBACK
+ # op op_xor_long FALLBACK
+ # op op_shl_long FALLBACK
+ # op op_shr_long FALLBACK
+ # op op_ushr_long FALLBACK
+ # op op_add_float FALLBACK
+ # op op_sub_float FALLBACK
+ # op op_mul_float FALLBACK
+ # op op_div_float FALLBACK
+ # op op_rem_float FALLBACK
+ # op op_add_double FALLBACK
+ # op op_sub_double FALLBACK
+ # op op_mul_double FALLBACK
+ # op op_div_double FALLBACK
+ # op op_rem_double FALLBACK
+ # op op_add_int_2addr FALLBACK
+ # op op_sub_int_2addr FALLBACK
+ # op op_mul_int_2addr FALLBACK
+ # op op_div_int_2addr FALLBACK
+ # op op_rem_int_2addr FALLBACK
+ # op op_and_int_2addr FALLBACK
+ # op op_or_int_2addr FALLBACK
+ # op op_xor_int_2addr FALLBACK
+ # op op_shl_int_2addr FALLBACK
+ # op op_shr_int_2addr FALLBACK
+ # op op_ushr_int_2addr FALLBACK
+ # op op_add_long_2addr FALLBACK
+ # op op_sub_long_2addr FALLBACK
+ # op op_mul_long_2addr FALLBACK
+ # op op_div_long_2addr FALLBACK
+ # op op_rem_long_2addr FALLBACK
+ # op op_and_long_2addr FALLBACK
+ # op op_or_long_2addr FALLBACK
+ # op op_xor_long_2addr FALLBACK
+ # op op_shl_long_2addr FALLBACK
+ # op op_shr_long_2addr FALLBACK
+ # op op_ushr_long_2addr FALLBACK
+ # op op_add_float_2addr FALLBACK
+ # op op_sub_float_2addr FALLBACK
+ # op op_mul_float_2addr FALLBACK
+ # op op_div_float_2addr FALLBACK
+ # op op_rem_float_2addr FALLBACK
+ # op op_add_double_2addr FALLBACK
+ # op op_sub_double_2addr FALLBACK
+ # op op_mul_double_2addr FALLBACK
+ # op op_div_double_2addr FALLBACK
+ # op op_rem_double_2addr FALLBACK
+ # op op_add_int_lit16 FALLBACK
+ # op op_rsub_int FALLBACK
+ # op op_mul_int_lit16 FALLBACK
+ # op op_div_int_lit16 FALLBACK
+ # op op_rem_int_lit16 FALLBACK
+ # op op_and_int_lit16 FALLBACK
+ # op op_or_int_lit16 FALLBACK
+ # op op_xor_int_lit16 FALLBACK
+ # op op_add_int_lit8 FALLBACK
+ # op op_rsub_int_lit8 FALLBACK
+ # op op_mul_int_lit8 FALLBACK
+ # op op_div_int_lit8 FALLBACK
+ # op op_rem_int_lit8 FALLBACK
+ # op op_and_int_lit8 FALLBACK
+ # op op_or_int_lit8 FALLBACK
+ # op op_xor_int_lit8 FALLBACK
+ # op op_shl_int_lit8 FALLBACK
+ # op op_shr_int_lit8 FALLBACK
+ # op op_ushr_int_lit8 FALLBACK
+ # op op_iget_quick FALLBACK
+ # op op_iget_wide_quick FALLBACK
+ # op op_iget_object_quick FALLBACK
+ # op op_iput_quick FALLBACK
+ # op op_iput_wide_quick FALLBACK
+ # op op_iput_object_quick FALLBACK
+ # op op_invoke_virtual_quick FALLBACK
+ # op op_invoke_virtual_range_quick FALLBACK
+ # op op_iput_boolean_quick FALLBACK
+ # op op_iput_byte_quick FALLBACK
+ # op op_iput_char_quick FALLBACK
+ # op op_iput_short_quick FALLBACK
+ # op op_iget_boolean_quick FALLBACK
+ # op op_iget_byte_quick FALLBACK
+ # op op_iget_char_quick FALLBACK
+ # op op_iget_short_quick FALLBACK
+ op op_invoke_lambda FALLBACK
+ # op op_unused_f4 FALLBACK
+ op op_capture_variable FALLBACK
+ op op_create_lambda FALLBACK
+ op op_liberate_variable FALLBACK
+ op op_box_lambda FALLBACK
+ op op_unbox_lambda FALLBACK
+ # op op_unused_fa FALLBACK
+ # op op_unused_fb FALLBACK
+ # op op_unused_fc FALLBACK
+ # op op_unused_fd FALLBACK
+ # op op_unused_fe FALLBACK
+ # op op_unused_ff FALLBACK
op-end
# common subroutines for asm
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
new file mode 100644
index 0000000000..e9d28abf8b
--- /dev/null
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -0,0 +1,11726 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'arm64'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: arm64/header.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat xFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via xFP &
+ number_of_vregs_.
+
+ */
+
+/*
+ARM64 Runtime register usage conventions.
+
+ r0 : w0 is 32-bit return register and x0 is 64-bit.
+ r0-r7 : Argument registers.
+ r8-r15 : Caller save registers (used as temporary registers).
+ r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
+ the linker, by the trampolines and other stubs (the backend uses
+ these as temporary registers).
+ r18 : Caller save register (used as temporary register).
+ r19 : Pointer to thread-local storage.
+ r20-r29: Callee save registers.
+ r30 : (lr) is reserved (the link register).
+ rsp : (sp) is reserved (the stack pointer).
+ rzr : (zr) is reserved (the zero register).
+
+ Floating-point registers
+ v0-v31
+
+ v0 : s0 is return register for singles (32-bit) and d0 for doubles (64-bit).
+ This is analogous to the C/C++ (hard-float) calling convention.
+ v0-v7 : Floating-point argument registers in both Dalvik and C/C++ conventions.
+ Also used as temporary and codegen scratch registers.
+
+ v0-v7 and v16-v31 : trashed across C calls.
+ v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved).
+
+ v16-v31: Used as codegen temp/scratch.
+ v8-v15 : Can be used for promotion.
+
+ Must maintain 16-byte stack alignment.
+
+Mterp notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ x20 xPC interpreted program counter, used for fetching instructions
+ x21 xFP interpreted frame pointer, used for accessing locals and args
+ x22 xSELF self (Thread) pointer
+ x23 xINST first 16-bit code unit of current instruction
+ x24 xIBASE interpreted instruction base pointer, used for computed goto
+ x25 xREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+ x16 ip scratch reg
+ x17 ip2 scratch reg (used by macros)
+
+Macros are provided for common operations. They MUST NOT alter unspecified registers or condition
+codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/* During bringup, we'll use the shadow frame model instead of xFP */
+/* single-purpose registers, given names for clarity */
+#define xPC x20
+#define xFP x21
+#define xSELF x22
+#define xINST x23
+#define wINST w23
+#define xIBASE x24
+#define xREFS x25
+#define ip x16
+#define ip2 x17
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep xFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+/*
+ *
+ * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
+ * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
+ * mterp should do so as well.
+ */
+#define MTERP_SUSPEND 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ str xPC, [xFP, #OFF_FP_DEX_PC_PTR]
+.endm
+
+/*
+ * Fetch the next instruction from xPC into wINST. Does not advance xPC.
+ */
+.macro FETCH_INST
+ ldrh wINST, [xPC]
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset. Advances xPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+ ldrh wINST, [xPC, #((\count)*2)]!
+.endm
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to xPC and xINST).
+ */
+.macro PREFETCH_ADVANCE_INST dreg, sreg, count
+ ldrh \dreg, [\sreg, #((\count)*2)]!
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update xPC. Used to load
+ * xINST ahead of possible exception point. Be sure to manually advance xPC
+ * later.
+ */
+.macro PREFETCH_INST count
+ ldrh wINST, [xPC, #((\count)*2)]
+.endm
+
+/* Advance xPC by some number of code units. */
+.macro ADVANCE count
+ add xPC, xPC, #((\count)*2)
+.endm
+
+/*
+ * Fetch the next instruction from an offset specified by _reg and advance xPC.
+ * xPC to point to the next instruction. "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value. Must not set flags.
+ *
+ */
+.macro FETCH_ADVANCE_INST_RB reg
+ add xPC, xPC, \reg, sxtw
+ ldrh wINST, [xPC]
+.endm
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance xPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+.macro FETCH reg, count
+ ldrh \reg, [xPC, #((\count)*2)]
+.endm
+
+.macro FETCH_S reg, count
+ ldrsh \reg, [xPC, #((\count)*2)]
+.endm
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+.macro FETCH_B reg, count, byte
+ ldrb \reg, [xPC, #((\count)*2+(\byte))]
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+ and \reg, xINST, #255
+.endm
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+.macro GET_PREFETCHED_OPCODE oreg, ireg
+ and \oreg, \ireg, #255
+.endm
+
+/*
+ * Begin executing the opcode in _reg. Clobbers reg
+ */
+
+.macro GOTO_OPCODE reg
+ add \reg, xIBASE, \reg, lsl #7
+ br \reg
+.endm
+.macro GOTO_OPCODE_BASE base,reg
+ add \reg, \base, \reg, lsl #7
+ br \reg
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+.macro GET_VREG reg, vreg
+ ldr \reg, [xFP, \vreg, uxtw #2]
+.endm
+.macro SET_VREG reg, vreg
+ str \reg, [xFP, \vreg, uxtw #2]
+ str wzr, [xREFS, \vreg, uxtw #2]
+.endm
+.macro SET_VREG_OBJECT reg, vreg, tmpreg
+ str \reg, [xFP, \vreg, uxtw #2]
+ str \reg, [xREFS, \vreg, uxtw #2]
+.endm
+
+/*
+ * Get/set the 64-bit value from a Dalvik register.
+ * TUNING: can we do better here?
+ */
+.macro GET_VREG_WIDE reg, vreg
+ add ip2, xFP, \vreg, lsl #2
+ ldr \reg, [ip2]
+.endm
+.macro SET_VREG_WIDE reg, vreg
+ add ip2, xFP, \vreg, lsl #2
+ str \reg, [ip2]
+ add ip2, xREFS, \vreg, lsl #2
+ str xzr, [ip2]
+.endm
+
+/*
+ * Convert a virtual register index into an address.
+ */
+.macro VREG_INDEX_TO_ADDR reg, vreg
+ add \reg, xFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
+.endm
+
+/* File: arm64/entry.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ .text
+
+/*
+ * Interpreter entry point.
+ * On entry:
+ * x0 Thread* self/
+ * x1 code_item
+ * x2 ShadowFrame
+ * x3 JValue* result_register
+ *
+ */
+ .global ExecuteMterpImpl
+ .type ExecuteMterpImpl, %function
+ .balign 16
+
+ExecuteMterpImpl:
+ .cfi_startproc
+ stp xIBASE, xREFS, [sp, #-64]!
+ stp xSELF, xINST, [sp, #16]
+ stp xPC, xFP, [sp, #32]
+ stp fp, lr, [sp, #48]
+ add fp, sp, #48
+
+ /* Remember the return register */
+ str x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
+
+ /* Remember the code_item */
+ str x1, [x2, #SHADOWFRAME_CODE_ITEM_OFFSET]
+
+ /* set up "named" registers */
+ mov xSELF, x0
+ ldr w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
+ add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to insns[] (i.e. - the dalivk byte code).
+ add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame
+ ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
+ add xPC, x1, #CODEITEM_INSNS_OFFSET // Point to base of insns[]
+ add xPC, xPC, w0, lsl #1 // Create direct pointer to 1st dex opcode
+ EXPORT_PC
+
+ /* Starting ibase */
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
+
+ /* start executing the instruction at rPC */
+ FETCH_INST // load wINST from rPC
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+ /* NOTE: no fallthrough */
+
+
+ .global artMterpAsmInstructionStart
+ .type artMterpAsmInstructionStart, %function
+artMterpAsmInstructionStart = .L_op_nop
+ .text
+
+/* ------------------------------ */
+ .balign 128
+.L_op_nop: /* 0x00 */
+/* File: arm64/op_nop.S */
+ FETCH_ADVANCE_INST 1 // advance to next instr, load rINST
+ GET_INST_OPCODE ip // ip<- opcode from rINST
+ GOTO_OPCODE ip // execute it
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move: /* 0x01 */
+/* File: arm64/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ lsr w1, wINST, #12 // x1<- B from 15:12
+ ubfx w0, wINST, #8, #4 // x0<- A from 11:8
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ GET_VREG w2, w1 // x2<- fp[B]
+ GET_INST_OPCODE ip // ip<- opcode from wINST
+ .if 0
+ SET_VREG_OBJECT w2, w0 // fp[A]<- x2
+ .else
+ SET_VREG w2, w0 // fp[A]<- x2
+ .endif
+ GOTO_OPCODE ip // execute next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_from16: /* 0x02 */
+/* File: arm64/op_move_from16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH w1, 1 // r1<- BBBB
+ lsr w0, wINST, #8 // r0<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ GET_VREG w2, w1 // r2<- fp[BBBB]
+ GET_INST_OPCODE ip // extract opcode from wINST
+ .if 0
+ SET_VREG_OBJECT w2, w0 // fp[AA]<- r2
+ .else
+ SET_VREG w2, w0 // fp[AA]<- r2
+ .endif
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_16: /* 0x03 */
+/* File: arm64/op_move_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH w1, 2 // w1<- BBBB
+ FETCH w0, 1 // w0<- AAAA
+ FETCH_ADVANCE_INST 3 // advance xPC, load xINST
+ GET_VREG w2, w1 // w2<- fp[BBBB]
+ GET_INST_OPCODE ip // extract opcode from xINST
+ .if 0
+ SET_VREG_OBJECT w2, w0 // fp[AAAA]<- w2
+ .else
+ SET_VREG w2, w0 // fp[AAAA]<- w2
+ .endif
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide: /* 0x04 */
+/* File: arm64/op_move_wide.S */
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE x3, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x3, w2
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide_from16: /* 0x05 */
+/* File: arm64/op_move_wide_from16.S */
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH w3, 1 // w3<- BBBB
+ lsr w2, wINST, #8 // w2<- AA
+ GET_VREG_WIDE x3, w3
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x3, w2
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide_16: /* 0x06 */
+/* File: arm64/op_move_wide_16.S */
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH w3, 2 // w3<- BBBB
+ FETCH w2, 1 // w2<- AAAA
+ GET_VREG_WIDE x3, w3
+ FETCH_ADVANCE_INST 3 // advance rPC, load rINST
+ SET_VREG_WIDE x3, w2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object: /* 0x07 */
+/* File: arm64/op_move_object.S */
+/* File: arm64/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ lsr w1, wINST, #12 // x1<- B from 15:12
+ ubfx w0, wINST, #8, #4 // x0<- A from 11:8
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ GET_VREG w2, w1 // x2<- fp[B]
+ GET_INST_OPCODE ip // ip<- opcode from wINST
+ .if 1
+ SET_VREG_OBJECT w2, w0 // fp[A]<- x2
+ .else
+ SET_VREG w2, w0 // fp[A]<- x2
+ .endif
+ GOTO_OPCODE ip // execute next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object_from16: /* 0x08 */
+/* File: arm64/op_move_object_from16.S */
+/* File: arm64/op_move_from16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH w1, 1 // r1<- BBBB
+ lsr w0, wINST, #8 // r0<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ GET_VREG w2, w1 // r2<- fp[BBBB]
+ GET_INST_OPCODE ip // extract opcode from wINST
+ .if 1
+ SET_VREG_OBJECT w2, w0 // fp[AA]<- r2
+ .else
+ SET_VREG w2, w0 // fp[AA]<- r2
+ .endif
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object_16: /* 0x09 */
+/* File: arm64/op_move_object_16.S */
+/* File: arm64/op_move_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH w1, 2 // w1<- BBBB
+ FETCH w0, 1 // w0<- AAAA
+ FETCH_ADVANCE_INST 3 // advance xPC, load xINST
+ GET_VREG w2, w1 // w2<- fp[BBBB]
+ GET_INST_OPCODE ip // extract opcode from xINST
+ .if 1
+ SET_VREG_OBJECT w2, w0 // fp[AAAA]<- w2
+ .else
+ SET_VREG w2, w0 // fp[AAAA]<- w2
+ .endif
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result: /* 0x0a */
+/* File: arm64/op_move_result.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ lsr w2, wINST, #8 // r2<- AA
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
+ ldr w0, [x0] // r0 <- result.i.
+ GET_INST_OPCODE ip // extract opcode from wINST
+ .if 0
+ SET_VREG_OBJECT w0, w2, w1 // fp[AA]<- r0
+ .else
+ SET_VREG w0, w2 // fp[AA]<- r0
+ .endif
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result_wide: /* 0x0b */
+/* File: arm64/op_move_result_wide.S */
+ /* for: move-result-wide */
+ /* op vAA */
+ lsr w2, wINST, #8 // r2<- AA
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
+ ldr x0, [x0] // r0 <- result.i.
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x0, x2 // fp[AA]<- r0
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result_object: /* 0x0c */
+/* File: arm64/op_move_result_object.S */
+/* File: arm64/op_move_result.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ lsr w2, wINST, #8 // r2<- AA
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
+ ldr w0, [x0] // r0 <- result.i.
+ GET_INST_OPCODE ip // extract opcode from wINST
+ .if 1
+ SET_VREG_OBJECT w0, w2, w1 // fp[AA]<- r0
+ .else
+ SET_VREG w0, w2 // fp[AA]<- r0
+ .endif
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_exception: /* 0x0d */
+/* File: arm64/op_move_exception.S */
+ /* move-exception vAA */
+ lsr w2, wINST, #8 // w2<- AA
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ mov x1, #0 // w1<- 0
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ SET_VREG_OBJECT w3, w2 // fp[AA]<- exception obj
+ GET_INST_OPCODE ip // extract opcode from rINST
+ str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // clear exception
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_void: /* 0x0e */
+/* File: arm64/op_return_void.S */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov x0, xSELF
+ ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.ne .Lop_return_void_check
+.Lop_return_void_return:
+ mov x0, #0
+ b MterpReturn
+.Lop_return_void_check:
+ bl MterpSuspendCheck // (self)
+ b .Lop_return_void_return
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return: /* 0x0f */
+/* File: arm64/op_return.S */
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov x0, xSELF
+ ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.ne .Lop_return_check
+.Lop_return_return:
+ lsr w2, wINST, #8 // r2<- AA
+ GET_VREG w0, w2 // r0<- vAA
+ b MterpReturn
+.Lop_return_check:
+ bl MterpSuspendCheck // (self)
+ b .Lop_return_return
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_wide: /* 0x10 */
+/* File: arm64/op_return_wide.S */
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov x0, xSELF
+ ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.ne .Lop_return_wide_check
+.Lop_return_wide_return:
+ lsr w2, wINST, #8 // w2<- AA
+ GET_VREG_WIDE x0, w2 // x0<- vAA
+ b MterpReturn
+.Lop_return_wide_check:
+ bl MterpSuspendCheck // (self)
+ b .Lop_return_wide_return
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_object: /* 0x11 */
+/* File: arm64/op_return_object.S */
+/* File: arm64/op_return.S */
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov x0, xSELF
+ ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.ne .Lop_return_object_check
+.Lop_return_object_return:
+ lsr w2, wINST, #8 // r2<- AA
+ GET_VREG w0, w2 // r0<- vAA
+ b MterpReturn
+.Lop_return_object_check:
+ bl MterpSuspendCheck // (self)
+ b .Lop_return_object_return
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_4: /* 0x12 */
+/* File: arm64/op_const_4.S */
+ /* const/4 vA, #+B */
+ lsl w1, wINST, #16 // w1<- Bxxx0000
+ ubfx w0, wINST, #8, #4 // w0<- A
+ FETCH_ADVANCE_INST 1 // advance xPC, load wINST
+ asr w1, w1, #28 // w1<- sssssssB (sign-extended)
+ GET_INST_OPCODE ip // ip<- opcode from xINST
+ SET_VREG w1, w0 // fp[A]<- w1
+ GOTO_OPCODE ip // execute next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_16: /* 0x13 */
+/* File: arm64/op_const_16.S */
+ /* const/16 vAA, #+BBBB */
+ FETCH_S w0, 1 // w0<- ssssBBBB (sign-extended
+ lsr w3, wINST, #8 // w3<- AA
+ FETCH_ADVANCE_INST 2 // advance xPC, load wINST
+ SET_VREG w0, w3 // vAA<- w0
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const: /* 0x14 */
+/* File: arm64/op_const.S */
+ /* const vAA, #+BBBBbbbb */
+ lsr w3, wINST, #8 // w3<- AA
+ FETCH w0, 1 // w0<- bbbb (low
+ FETCH w1, 2 // w1<- BBBB (high
+ FETCH_ADVANCE_INST 3 // advance rPC, load wINST
+ orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG w0, w3 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_high16: /* 0x15 */
+/* File: arm64/op_const_high16.S */
+ /* const/high16 vAA, #+BBBB0000 */
+ FETCH w0, 1 // r0<- 0000BBBB (zero-extended
+ lsr w3, wINST, #8 // r3<- AA
+ lsl w0, w0, #16 // r0<- BBBB0000
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ SET_VREG w0, w3 // vAA<- r0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_16: /* 0x16 */
+/* File: arm64/op_const_wide_16.S */
+ /* const-wide/16 vAA, #+BBBB */
+ FETCH_S w0, 1 // w0<- ssssBBBB (sign-extended
+ lsr w3, wINST, #8 // w3<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ sbfm x0, x0, 0, 31
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w3
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_32: /* 0x17 */
+/* File: arm64/op_const_wide_32.S */
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ FETCH w0, 1 // w0<- 0000bbbb (low)
+ lsr w3, wINST, #8 // w3<- AA
+ FETCH_S w2, 2 // w2<- ssssBBBB (high)
+ FETCH_ADVANCE_INST 3 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ orr w0, w0, w2, lsl #16 // w0<- BBBBbbbb
+ sbfm x0, x0, 0, 31
+ SET_VREG_WIDE x0, w3
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide: /* 0x18 */
+/* File: arm64/op_const_wide.S */
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ FETCH w0, 1 // w0<- bbbb (low)
+ FETCH w1, 2 // w1<- BBBB (low middle)
+ FETCH w2, 3 // w2<- hhhh (high middle)
+ FETCH w3, 4 // w3<- HHHH (high)
+ lsr w4, wINST, #8 // r4<- AA
+ FETCH_ADVANCE_INST 5 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
+ orr x0, x0, x2, lsl #32 // w0<- hhhhBBBBbbbb
+ orr x0, x0, x3, lsl #48 // w0<- HHHHhhhhBBBBbbbb
+ SET_VREG_WIDE x0, w4
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_high16: /* 0x19 */
+/* File: arm64/op_const_wide_high16.S */
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ FETCH w0, 1 // w0<- 0000BBBB (zero-extended)
+ lsr w1, wINST, #8 // w1<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ lsl x0, x0, #48
+ SET_VREG_WIDE x0, w1
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_string: /* 0x1a */
+/* File: arm64/op_const_string.S */
+ /* const/string vAA, String//BBBB */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- BBBB
+ lsr w1, wINST, #8 // w1<- AA
+ add x2, xFP, #OFF_FP_SHADOWFRAME
+ mov x3, xSELF
+ bl MterpConstString // (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 // load rINST
+ cbnz w0, MterpPossibleException // let reference interpreter deal with it.
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_string_jumbo: /* 0x1b */
+/* File: arm64/op_const_string_jumbo.S */
+ /* const/string vAA, String//BBBBBBBB */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- bbbb (low
+ FETCH w2, 2 // w2<- BBBB (high
+ lsr w1, wINST, #8 // w1<- AA
+ orr w0, w0, w2, lsl #16 // w1<- BBBBbbbb
+ add x2, xFP, #OFF_FP_SHADOWFRAME
+ mov x3, xSELF
+ bl MterpConstString // (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 3 // advance rPC
+ cbnz w0, MterpPossibleException // let reference interpreter deal with it.
+ ADVANCE 3 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_class: /* 0x1c */
+/* File: arm64/op_const_class.S */
+ /* const/class vAA, Class//BBBB */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- BBBB
+ lsr w1, wINST, #8 // w1<- AA
+ add x2, xFP, #OFF_FP_SHADOWFRAME
+ mov x3, xSELF
+ bl MterpConstClass // (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2
+ cbnz w0, MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_monitor_enter: /* 0x1d */
+/* File: arm64/op_monitor_enter.S */
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC
+ lsr w2, wINST, #8 // w2<- AA
+ GET_VREG w0, w2 // w0<- vAA (object)
+ mov x1, xSELF // w1<- self
+ bl artLockObjectFromCode
+ cbnz w0, MterpException
+ FETCH_ADVANCE_INST 1
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_monitor_exit: /* 0x1e */
+/* File: arm64/op_monitor_exit.S */
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC
+ lsr w2, wINST, #8 // w2<- AA
+ GET_VREG w0, w2 // w0<- vAA (object)
+ mov x1, xSELF // w0<- self
+ bl artUnlockObjectFromCode // w0<- success for unlock(self, obj)
+ cbnz w0, MterpException
+ FETCH_ADVANCE_INST 1 // before throw: advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_check_cast: /* 0x1f */
+/* File: arm64/op_check_cast.S */
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class//BBBB */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- BBBB
+ lsr w1, wINST, #8 // w1<- AA
+ VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
+ mov x3, xSELF // w3<- self
+ bl MterpCheckCast // (index, &obj, method, self)
+ PREFETCH_INST 2
+ cbnz w0, MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_instance_of: /* 0x20 */
+/* File: arm64/op_instance_of.S */
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class//CCCC */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- CCCC
+ lsr w1, wINST, #12 // w1<- B
+ VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
+ mov x3, xSELF // w3<- self
+ bl MterpInstanceOf // (index, &obj, method, self)
+ ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w2, wINST, #8 // w2<- A+
+ and w2, w2, #15 // w2<- A
+ PREFETCH_INST 2
+ cbnz x1, MterpException
+ ADVANCE 2 // advance rPC
+ SET_VREG w0, w2 // vA<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_array_length: /* 0x21 */
+/* File: arm64/op_array_length.S */
+ /*
+ * Return the length of an array.
+ */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w0, w1 // w0<- vB (object ref)
+ cbz w0, common_errNullObject // yup, fail
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- array length
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w3, w2 // vB<- length
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_new_instance: /* 0x22 */
+/* File: arm64/op_new_instance.S */
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class//BBBB */
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xSELF
+ mov w2, wINST
+ bl MterpNewInstance // (shadow_frame, self, inst_data)
+ cbz w0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_new_array: /* 0x23 */
+/* File: arm64/op_new_array.S */
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class//CCCC */
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov w2, wINST
+ mov x3, xSELF
+ bl MterpNewArray
+ cbz w0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_filled_new_array: /* 0x24 */
+/* File: arm64/op_filled_new_array.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
+ .extern MterpFilledNewArray
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov x2, xSELF
+ bl MterpFilledNewArray
+ cbz w0, MterpPossibleException
+ FETCH_ADVANCE_INST 3 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_filled_new_array_range: /* 0x25 */
+/* File: arm64/op_filled_new_array_range.S */
+/* File: arm64/op_filled_new_array.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
+ .extern MterpFilledNewArrayRange
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov x2, xSELF
+ bl MterpFilledNewArrayRange
+ cbz w0, MterpPossibleException
+ FETCH_ADVANCE_INST 3 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_fill_array_data: /* 0x26 */
+/* File: arm64/op_fill_array_data.S */
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- bbbb (lo)
+ FETCH w1, 2 // w1<- BBBB (hi)
+ lsr w3, wINST, #8 // w3<- AA
+ orr w1, w0, w1, lsl #16 // w1<- BBBBbbbb
+ GET_VREG w0, w3 // w0<- vAA (array object)
+ add x1, xPC, w1, lsl #1 // w1<- PC + BBBBbbbb*2 (array data off.)
+ bl MterpFillArrayData // (obj, payload)
+ cbz w0, MterpPossibleException // exception?
+ FETCH_ADVANCE_INST 3 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_throw: /* 0x27 */
+/* File: arm64/op_throw.S */
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ lsr w2, wINST, #8 // r2<- AA
+ GET_VREG w1, w2 // r1<- vAA (exception object)
+ cbz w1, common_errNullObject
+ str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // thread->exception<- obj
+ b MterpException
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto: /* 0x28 */
+/* File: arm64/op_goto.S */
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ /* tuning: use sbfx for 6t2+ targets */
+#if MTERP_SUSPEND
+ mov w0, wINST, lsl #16 // w0<- AAxx0000
+ movs w1, w0, asr #24 // w1<- ssssssAA (sign-extended)
+ add w2, w1, w1 // w2<- byte offset, set flags
+ // If backwards branch refresh rIBASE
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh handler base
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET] // Preload flags for MterpCheckSuspendAndContinue
+ lsl w0, wINST, #16 // w0<- AAxx0000
+ asr w0, w0, #24 // w0<- ssssssAA (sign-extended)
+ adds w1, w0, w0 // Convert dalvik offset to byte offset, setting flags
+ FETCH_ADVANCE_INST_RB w1 // load wINST and advance xPC
+ // If backwards branch refresh rIBASE
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto_16: /* 0x29 */
+/* File: arm64/op_goto_16.S */
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+#if MTERP_SUSPEND
+ FETCH_S w0, 1 // w0<- ssssAAAA (sign-extended)
+ adds w1, w0, w0 // w1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load rINST
+ ldrmi xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh handler base
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ FETCH_S w0, 1 // w0<- ssssAAAA (sign-extended)
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ adds w1, w0, w0 // w1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load rINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto_32: /* 0x2a */
+/* File: arm64/op_goto_32.S */
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0". Because
+ * we need the V bit set, we'll use an adds to convert from Dalvik
+ * offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+#if MTERP_SUSPEND
+ FETCH w0, 1 // w0<- aaaa (lo)
+ FETCH w1, 2 // w1<- AAAA (hi)
+ orr w0, w0, w1, lsl #16 // w0<- AAAAaaaa
+ adds w1, w0, w0 // w1<- byte offset
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load xINST
+ ldrle xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh handler base
+ GET_INST_OPCODE ip // extract opcode from xINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ FETCH w0, 1 // w0<- aaaa (lo)
+ FETCH w1, 2 // w1<- AAAA (hi)
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ orr w0, w0, w1, lsl #16 // w0<- AAAAaaaa
+ adds w1, w0, w0 // w1<- byte offset
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load xINST
+ b.le MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from xINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_packed_switch: /* 0x2b */
+/* File: arm64/op_packed_switch.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+#if MTERP_SUSPEND
+ FETCH w0, 1 // w0<- bbbb (lo)
+ FETCH w1, 2 // w1<- BBBB (hi)
+ mov w3, wINST, lsr #8 // w3<- AA
+ orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
+ GET_VREG w1, w3 // w1<- vAA
+ add w0, rPC, w0, lsl #1 // w0<- PC + BBBBbbbb*2
+ bl MterpDoPackedSwitch // w0<- code-unit branch offset
+ adds w1, w0, w0 // w1<- byte offset; clear V
+ ldrle rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh handler base
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ FETCH w0, 1 // w0<- bbbb (lo)
+ FETCH w1, 2 // w1<- BBBB (hi)
+ lsr w3, wINST, #8 // w3<- AA
+ orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
+ GET_VREG w1, w3 // w1<- vAA
+ add x0, xPC, w0, lsl #1 // w0<- PC + BBBBbbbb*2
+ bl MterpDoPackedSwitch // w0<- code-unit branch offset
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ adds w1, w0, w0 // w1<- byte offset; clear V
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load wINST
+ b.le MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sparse_switch: /* 0x2c */
+/* File: arm64/op_sparse_switch.S */
+/* File: arm64/op_packed_switch.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+#if MTERP_SUSPEND
+ FETCH w0, 1 // w0<- bbbb (lo)
+ FETCH w1, 2 // w1<- BBBB (hi)
+ mov w3, wINST, lsr #8 // w3<- AA
+ orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
+ GET_VREG w1, w3 // w1<- vAA
+ add w0, rPC, w0, lsl #1 // w0<- PC + BBBBbbbb*2
+ bl MterpDoSparseSwitch // w0<- code-unit branch offset
+ adds w1, w0, w0 // w1<- byte offset; clear V
+ ldrle rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh handler base
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ FETCH w0, 1 // w0<- bbbb (lo)
+ FETCH w1, 2 // w1<- BBBB (hi)
+ lsr w3, wINST, #8 // w3<- AA
+ orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
+ GET_VREG w1, w3 // w1<- vAA
+ add x0, xPC, w0, lsl #1 // w0<- PC + BBBBbbbb*2
+ bl MterpDoSparseSwitch // w0<- code-unit branch offset
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ adds w1, w0, w0 // w1<- byte offset; clear V
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load wINST
+ b.le MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpl_float: /* 0x2d */
+/* File: arm64/op_cmpl_float.S */
+/* File: arm64/fcmp.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG s1, w2
+ GET_VREG s2, w3
+ mov w0, #-1
+ fcmp s1, s2
+ csneg w0, w0, w0, le
+ csel w0, wzr, w0, eq
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w4 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpg_float: /* 0x2e */
+/* File: arm64/op_cmpg_float.S */
+/* File: arm64/fcmp.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG s1, w2
+ GET_VREG s2, w3
+ mov w0, #1
+ fcmp s1, s2
+ csneg w0, w0, w0, pl
+ csel w0, wzr, w0, eq
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w4 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpl_double: /* 0x2f */
+/* File: arm64/op_cmpl_double.S */
+/* File: arm64/fcmp.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG_WIDE d1, w2
+ GET_VREG_WIDE d2, w3
+ mov w0, #-1
+ fcmp d1, d2
+ csneg w0, w0, w0, le
+ csel w0, wzr, w0, eq
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w4 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpg_double: /* 0x30 */
+/* File: arm64/op_cmpg_double.S */
+/* File: arm64/fcmp.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG_WIDE d1, w2
+ GET_VREG_WIDE d2, w3
+ mov w0, #1
+ fcmp d1, d2
+ csneg w0, w0, w0, pl
+ csel w0, wzr, w0, eq
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w4 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmp_long: /* 0x31 */
+/* File: arm64/op_cmp_long.S */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG_WIDE x1, w2
+ GET_VREG_WIDE x2, w3
+ cmp x1, x2
+ csinc w0, wzr, wzr, eq
+ csneg w0, w0, w0, ge
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ SET_VREG w0, w4
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_eq: /* 0x32 */
+/* File: arm64/op_if_eq.S */
+/* File: arm64/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov w1, wINST, lsr #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, w3 // compare (vA, vB)
+ moveq w1, #2 // w1<- BYTE branch dist for not-taken
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh rIBASE
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Offset if branch not taken
+ cmp w2, w3 // compare (vA, vB)
+ csel w1, w1, w0, eq // Branch if true
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ne: /* 0x33 */
+/* File: arm64/op_if_ne.S */
+/* File: arm64/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov w1, wINST, lsr #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, w3 // compare (vA, vB)
+ movne w1, #2 // w1<- BYTE branch dist for not-taken
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh rIBASE
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Offset if branch not taken
+ cmp w2, w3 // compare (vA, vB)
+ csel w1, w1, w0, ne // Branch if true
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_lt: /* 0x34 */
+/* File: arm64/op_if_lt.S */
+/* File: arm64/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov w1, wINST, lsr #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, w3 // compare (vA, vB)
+ movlt w1, #2 // w1<- BYTE branch dist for not-taken
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh rIBASE
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Offset if branch not taken
+ cmp w2, w3 // compare (vA, vB)
+ csel w1, w1, w0, lt // Branch if true
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ge: /* 0x35 */
+/* File: arm64/op_if_ge.S */
+/* File: arm64/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov w1, wINST, lsr #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, w3 // compare (vA, vB)
+ movge w1, #2 // w1<- BYTE branch dist for not-taken
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh rIBASE
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Offset if branch not taken
+ cmp w2, w3 // compare (vA, vB)
+ csel w1, w1, w0, ge // Branch if true
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gt: /* 0x36 */
+/* File: arm64/op_if_gt.S */
+/* File: arm64/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov w1, wINST, lsr #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, w3 // compare (vA, vB)
+ movgt w1, #2 // w1<- BYTE branch dist for not-taken
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh rIBASE
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Offset if branch not taken
+ cmp w2, w3 // compare (vA, vB)
+ csel w1, w1, w0, gt // Branch if true
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_le: /* 0x37 */
+/* File: arm64/op_if_le.S */
+/* File: arm64/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov w1, wINST, lsr #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, w3 // compare (vA, vB)
+ movle w1, #2 // w1<- BYTE branch dist for not-taken
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh rIBASE
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Offset if branch not taken
+ cmp w2, w3 // compare (vA, vB)
+ csel w1, w1, w0, le // Branch if true
+ adds w2, w1, w1 // convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_eqz: /* 0x38 */
+/* File: arm64/op_if_eqz.S */
+/* File: arm64/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov w0, wINST, lsr #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, #0 // compare (vA, 0)
+ moveq w1, #2 // w1<- inst branch dist for not-taken
+ adds w1, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh table base
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w0, wINST, #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Branch offset if not taken
+ cmp w2, #0 // compare (vA, 0)
+ csel w1, w1, w0, eq // Branch if true
+ adds w2, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_nez: /* 0x39 */
+/* File: arm64/op_if_nez.S */
+/* File: arm64/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov w0, wINST, lsr #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, #0 // compare (vA, 0)
+ movne w1, #2 // w1<- inst branch dist for not-taken
+ adds w1, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh table base
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w0, wINST, #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Branch offset if not taken
+ cmp w2, #0 // compare (vA, 0)
+ csel w1, w1, w0, ne // Branch if true
+ adds w2, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ltz: /* 0x3a */
+/* File: arm64/op_if_ltz.S */
+/* File: arm64/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov w0, wINST, lsr #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, #0 // compare (vA, 0)
+ movlt w1, #2 // w1<- inst branch dist for not-taken
+ adds w1, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh table base
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w0, wINST, #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Branch offset if not taken
+ cmp w2, #0 // compare (vA, 0)
+ csel w1, w1, w0, lt // Branch if true
+ adds w2, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gez: /* 0x3b */
+/* File: arm64/op_if_gez.S */
+/* File: arm64/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov w0, wINST, lsr #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, #0 // compare (vA, 0)
+ movge w1, #2 // w1<- inst branch dist for not-taken
+ adds w1, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh table base
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w0, wINST, #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Branch offset if not taken
+ cmp w2, #0 // compare (vA, 0)
+ csel w1, w1, w0, ge // Branch if true
+ adds w2, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gtz: /* 0x3c */
+/* File: arm64/op_if_gtz.S */
+/* File: arm64/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov w0, wINST, lsr #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, #0 // compare (vA, 0)
+ movgt w1, #2 // w1<- inst branch dist for not-taken
+ adds w1, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh table base
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w0, wINST, #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Branch offset if not taken
+ cmp w2, #0 // compare (vA, 0)
+ csel w1, w1, w0, gt // Branch if true
+ adds w2, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_lez: /* 0x3d */
+/* File: arm64/op_if_lez.S */
+/* File: arm64/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov w0, wINST, lsr #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ cmp w2, #0 // compare (vA, 0)
+ movle w1, #2 // w1<- inst branch dist for not-taken
+ adds w1, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w1 // update rPC, load wINST
+ ldrmi rIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh table base
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#else
+ lsr w0, wINST, #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S w1, 1 // w1<- branch offset, in code units
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov w0, #2 // Branch offset if not taken
+ cmp w2, #0 // compare (vA, 0)
+ csel w1, w1, w0, le // Branch if true
+ adds w2, w1, w1 // convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ b.mi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_3e: /* 0x3e */
+/* File: arm64/op_unused_3e.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_3f: /* 0x3f */
+/* File: arm64/op_unused_3f.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_40: /* 0x40 */
+/* File: arm64/op_unused_40.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_41: /* 0x41 */
+/* File: arm64/op_unused_41.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_42: /* 0x42 */
+/* File: arm64/op_unused_42.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_43: /* 0x43 */
+/* File: arm64/op_unused_43.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget: /* 0x44 */
+/* File: arm64/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz x0, common_errNullObject // bail if null array object.
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, uxtw #2 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ ldr w2, [x0, #MIRROR_INT_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w2, w9 // vAA<- w2
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_wide: /* 0x45 */
+/* File: arm64/op_aget_wide.S */
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz w0, common_errNullObject // yes, bail
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ ldr x2, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] // x2<- vBB[vCC]
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x2, w4
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_object: /* 0x46 */
+/* File: arm64/op_aget_object.S */
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ FETCH_B w3, 1, 1 // w3<- CC
+ EXPORT_PC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ bl artAGetObjectFromMterp // (array, index)
+ ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w2, wINST, #8 // w9<- AA
+ PREFETCH_INST 2
+ cbnz w1, MterpException
+ SET_VREG_OBJECT w0, w2
+ ADVANCE 2
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_boolean: /* 0x47 */
+/* File: arm64/op_aget_boolean.S */
+/* File: arm64/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz x0, common_errNullObject // bail if null array object.
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, uxtw #0 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ ldrb w2, [x0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w2, w9 // vAA<- w2
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_byte: /* 0x48 */
+/* File: arm64/op_aget_byte.S */
+/* File: arm64/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz x0, common_errNullObject // bail if null array object.
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, uxtw #0 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ ldrsb w2, [x0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w2, w9 // vAA<- w2
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_char: /* 0x49 */
+/* File: arm64/op_aget_char.S */
+/* File: arm64/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz x0, common_errNullObject // bail if null array object.
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, uxtw #1 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ ldrh w2, [x0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w2, w9 // vAA<- w2
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_short: /* 0x4a */
+/* File: arm64/op_aget_short.S */
+/* File: arm64/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz x0, common_errNullObject // bail if null array object.
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, uxtw #1 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ ldrsh w2, [x0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w2, w9 // vAA<- w2
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput: /* 0x4b */
+/* File: arm64/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz w0, common_errNullObject // bail if null
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, lsl #2 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_VREG w2, w9 // w2<- vAA
+ GET_INST_OPCODE ip // extract opcode from rINST
+ str w2, [x0, #MIRROR_INT_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_wide: /* 0x4c */
+/* File: arm64/op_aput_wide.S */
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz w0, common_errNullObject // bail if null
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ GET_VREG_WIDE x1, w4
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ str x1, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_object: /* 0x4d */
+/* File: arm64/op_aput_object.S */
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov w2, wINST
+ bl MterpAputObject
+ cbz w0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_boolean: /* 0x4e */
+/* File: arm64/op_aput_boolean.S */
+/* File: arm64/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz w0, common_errNullObject // bail if null
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, lsl #0 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_VREG w2, w9 // w2<- vAA
+ GET_INST_OPCODE ip // extract opcode from rINST
+ strb w2, [x0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_byte: /* 0x4f */
+/* File: arm64/op_aput_byte.S */
+/* File: arm64/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz w0, common_errNullObject // bail if null
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, lsl #0 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_VREG w2, w9 // w2<- vAA
+ GET_INST_OPCODE ip // extract opcode from rINST
+ strb w2, [x0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_char: /* 0x50 */
+/* File: arm64/op_aput_char.S */
+/* File: arm64/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz w0, common_errNullObject // bail if null
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, lsl #1 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_VREG w2, w9 // w2<- vAA
+ GET_INST_OPCODE ip // extract opcode from rINST
+ strh w2, [x0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_short: /* 0x51 */
+/* File: arm64/op_aput_short.S */
+/* File: arm64/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz w0, common_errNullObject // bail if null
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, lsl #1 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_VREG w2, w9 // w2<- vAA
+ GET_INST_OPCODE ip // extract opcode from rINST
+ strh w2, [x0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget: /* 0x52 */
+/* File: arm64/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
+ mov x3, xSELF // w3<- self
+ bl artGet32InstanceFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx w2, wINST, #8, #4 // w2<- A
+ PREFETCH_INST 2
+ cbnz x3, MterpPossibleException // bail out
+ .if 0
+ SET_VREG_OBJECT w0, w2 // fp[A]<- w0
+ .else
+ SET_VREG w0, w2 // fp[A]<- w0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_wide: /* 0x53 */
+/* File: arm64/op_iget_wide.S */
+ /*
+ * 64-bit instance field get.
+ *
+ * for: iget-wide
+ */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
+ mov x3, xSELF // w3<- self
+ bl artGet64InstanceFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx w2, wINST, #8, #4 // w2<- A
+ PREFETCH_INST 2
+ cmp w3, #0
+ cbnz w3, MterpException // bail out
+ SET_VREG_WIDE x0, w2
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_object: /* 0x54 */
+/* File: arm64/op_iget_object.S */
+/* File: arm64/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
+ mov x3, xSELF // w3<- self
+ bl artGetObjInstanceFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx w2, wINST, #8, #4 // w2<- A
+ PREFETCH_INST 2
+ cbnz x3, MterpPossibleException // bail out
+ .if 1
+ SET_VREG_OBJECT w0, w2 // fp[A]<- w0
+ .else
+ SET_VREG w0, w2 // fp[A]<- w0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_boolean: /* 0x55 */
+/* File: arm64/op_iget_boolean.S */
+/* File: arm64/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
+ mov x3, xSELF // w3<- self
+ bl artGetBooleanInstanceFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx w2, wINST, #8, #4 // w2<- A
+ PREFETCH_INST 2
+ cbnz x3, MterpPossibleException // bail out
+ .if 0
+ SET_VREG_OBJECT w0, w2 // fp[A]<- w0
+ .else
+ SET_VREG w0, w2 // fp[A]<- w0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_byte: /* 0x56 */
+/* File: arm64/op_iget_byte.S */
+/* File: arm64/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
+ mov x3, xSELF // w3<- self
+ bl artGetByteInstanceFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx w2, wINST, #8, #4 // w2<- A
+ PREFETCH_INST 2
+ cbnz x3, MterpPossibleException // bail out
+ .if 0
+ SET_VREG_OBJECT w0, w2 // fp[A]<- w0
+ .else
+ SET_VREG w0, w2 // fp[A]<- w0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_char: /* 0x57 */
+/* File: arm64/op_iget_char.S */
+/* File: arm64/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
+ mov x3, xSELF // w3<- self
+ bl artGetCharInstanceFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx w2, wINST, #8, #4 // w2<- A
+ PREFETCH_INST 2
+ cbnz x3, MterpPossibleException // bail out
+ .if 0
+ SET_VREG_OBJECT w0, w2 // fp[A]<- w0
+ .else
+ SET_VREG w0, w2 // fp[A]<- w0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_short: /* 0x58 */
+/* File: arm64/op_iget_short.S */
+/* File: arm64/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
+ mov x3, xSELF // w3<- self
+ bl artGetShortInstanceFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx w2, wINST, #8, #4 // w2<- A
+ PREFETCH_INST 2
+ cbnz x3, MterpPossibleException // bail out
+ .if 0
+ SET_VREG_OBJECT w0, w2 // fp[A]<- w0
+ .else
+ SET_VREG w0, w2 // fp[A]<- w0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput: /* 0x59 */
+/* File: arm64/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field//CCCC */
+ .extern artSet32InstanceFromMterp
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w2, w2 // w2<- fp[A]
+ ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
+ PREFETCH_INST 2
+ bl artSet32InstanceFromMterp
+ cbnz w0, MterpPossibleException
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_wide: /* 0x5a */
+/* File: arm64/op_iput_wide.S */
+ /* iput-wide vA, vB, field//CCCC */
+ .extern artSet64InstanceFromMterp
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ add x2, xFP, x2, lsl #2 // w2<- &fp[A]
+ ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
+ PREFETCH_INST 2
+ bl artSet64InstanceFromMterp
+ cbnz w0, MterpPossibleException
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_object: /* 0x5b */
+/* File: arm64/op_iput_object.S */
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov w2, wINST
+ mov x3, xSELF
+ bl MterpIputObject
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_boolean: /* 0x5c */
+/* File: arm64/op_iput_boolean.S */
+/* File: arm64/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field//CCCC */
+ .extern artSet8InstanceFromMterp
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w2, w2 // w2<- fp[A]
+ ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
+ PREFETCH_INST 2
+ bl artSet8InstanceFromMterp
+ cbnz w0, MterpPossibleException
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_byte: /* 0x5d */
+/* File: arm64/op_iput_byte.S */
+/* File: arm64/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field//CCCC */
+ .extern artSet8InstanceFromMterp
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w2, w2 // w2<- fp[A]
+ ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
+ PREFETCH_INST 2
+ bl artSet8InstanceFromMterp
+ cbnz w0, MterpPossibleException
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_char: /* 0x5e */
+/* File: arm64/op_iput_char.S */
+/* File: arm64/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field//CCCC */
+ .extern artSet16InstanceFromMterp
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w2, w2 // w2<- fp[A]
+ ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
+ PREFETCH_INST 2
+ bl artSet16InstanceFromMterp
+ cbnz w0, MterpPossibleException
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_short: /* 0x5f */
+/* File: arm64/op_iput_short.S */
+/* File: arm64/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field//CCCC */
+ .extern artSet16InstanceFromMterp
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref CCCC
+ lsr w1, wINST, #12 // w1<- B
+ GET_VREG w1, w1 // w1<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w2, w2 // w2<- fp[A]
+ ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
+ PREFETCH_INST 2
+ bl artSet16InstanceFromMterp
+ cbnz w0, MterpPossibleException
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget: /* 0x60 */
+/* File: arm64/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+
+ .extern artGet32StaticFromCode
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref BBBB
+ ldr x1, [xFP, #OFF_FP_METHOD]
+ mov x2, xSELF
+ bl artGet32StaticFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w2, wINST, #8 // w2<- AA
+
+ PREFETCH_INST 2
+ cbnz x3, MterpException // bail out
+.if 0
+ SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
+.else
+ SET_VREG w0, w2 // fp[AA]<- w0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_wide: /* 0x61 */
+/* File: arm64/op_sget_wide.S */
+ /*
+ * SGET_WIDE handler wrapper.
+ *
+ */
+ /* sget-wide vAA, field//BBBB */
+
+ .extern artGet64StaticFromCode
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref BBBB
+ ldr x1, [xFP, #OFF_FP_METHOD]
+ mov x2, xSELF
+ bl artGet64StaticFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w4, wINST, #8 // w4<- AA
+ cbnz x3, MterpException // bail out
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ SET_VREG_WIDE x0, w4
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_object: /* 0x62 */
+/* File: arm64/op_sget_object.S */
+/* File: arm64/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+
+ .extern artGetObjStaticFromCode
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref BBBB
+ ldr x1, [xFP, #OFF_FP_METHOD]
+ mov x2, xSELF
+ bl artGetObjStaticFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w2, wINST, #8 // w2<- AA
+
+ PREFETCH_INST 2
+ cbnz x3, MterpException // bail out
+.if 1
+ SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
+.else
+ SET_VREG w0, w2 // fp[AA]<- w0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_boolean: /* 0x63 */
+/* File: arm64/op_sget_boolean.S */
+/* File: arm64/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+
+ .extern artGetBooleanStaticFromCode
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref BBBB
+ ldr x1, [xFP, #OFF_FP_METHOD]
+ mov x2, xSELF
+ bl artGetBooleanStaticFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w2, wINST, #8 // w2<- AA
+ uxtb w0, w0
+ PREFETCH_INST 2
+ cbnz x3, MterpException // bail out
+.if 0
+ SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
+.else
+ SET_VREG w0, w2 // fp[AA]<- w0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_byte: /* 0x64 */
+/* File: arm64/op_sget_byte.S */
+/* File: arm64/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+
+ .extern artGetByteStaticFromCode
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref BBBB
+ ldr x1, [xFP, #OFF_FP_METHOD]
+ mov x2, xSELF
+ bl artGetByteStaticFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w2, wINST, #8 // w2<- AA
+ sxtb w0, w0
+ PREFETCH_INST 2
+ cbnz x3, MterpException // bail out
+.if 0
+ SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
+.else
+ SET_VREG w0, w2 // fp[AA]<- w0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_char: /* 0x65 */
+/* File: arm64/op_sget_char.S */
+/* File: arm64/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+
+ .extern artGetCharStaticFromCode
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref BBBB
+ ldr x1, [xFP, #OFF_FP_METHOD]
+ mov x2, xSELF
+ bl artGetCharStaticFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w2, wINST, #8 // w2<- AA
+ uxth w0, w0
+ PREFETCH_INST 2
+ cbnz x3, MterpException // bail out
+.if 0
+ SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
+.else
+ SET_VREG w0, w2 // fp[AA]<- w0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_short: /* 0x66 */
+/* File: arm64/op_sget_short.S */
+/* File: arm64/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field//BBBB */
+
+ .extern artGetShortStaticFromCode
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref BBBB
+ ldr x1, [xFP, #OFF_FP_METHOD]
+ mov x2, xSELF
+ bl artGetShortStaticFromCode
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w2, wINST, #8 // w2<- AA
+ sxth w0, w0
+ PREFETCH_INST 2
+ cbnz x3, MterpException // bail out
+.if 0
+ SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
+.else
+ SET_VREG w0, w2 // fp[AA]<- w0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput: /* 0x67 */
+/* File: arm64/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field//BBBB */
+ EXPORT_PC
+ FETCH w0, 1 // r0<- field ref BBBB
+ lsr w3, wINST, #8 // r3<- AA
+ GET_VREG w1, w3 // r1<= fp[AA]
+ ldr x2, [xFP, #OFF_FP_METHOD]
+ mov x3, xSELF
+ PREFETCH_INST 2 // Get next inst, but don't advance rPC
+ bl artSet32StaticFromCode
+ cbnz w0, MterpException // 0 on success
+ ADVANCE 2 // Past exception point - now advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_wide: /* 0x68 */
+/* File: arm64/op_sput_wide.S */
+ /*
+ * SPUT_WIDE handler wrapper.
+ *
+ */
+ /* sput-wide vAA, field//BBBB */
+ .extern artSet64IndirectStaticFromMterp
+ EXPORT_PC
+ FETCH w0, 1 // w0<- field ref BBBB
+ ldr x1, [xFP, #OFF_FP_METHOD]
+ lsr w2, wINST, #8 // w3<- AA
+ add x2, xFP, w2, lsl #2
+ mov x3, xSELF
+ PREFETCH_INST 2 // Get next inst, but don't advance rPC
+ bl artSet64IndirectStaticFromMterp
+ cbnz w0, MterpException // 0 on success, -1 on failure
+ ADVANCE 2 // Past exception point - now advance rPC
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_object: /* 0x69 */
+/* File: arm64/op_sput_object.S */
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov x2, xINST
+ mov x3, xSELF
+ bl MterpSputObject
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_boolean: /* 0x6a */
+/* File: arm64/op_sput_boolean.S */
+/* File: arm64/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field//BBBB */
+ EXPORT_PC
+ FETCH w0, 1 // r0<- field ref BBBB
+ lsr w3, wINST, #8 // r3<- AA
+ GET_VREG w1, w3 // r1<= fp[AA]
+ ldr x2, [xFP, #OFF_FP_METHOD]
+ mov x3, xSELF
+ PREFETCH_INST 2 // Get next inst, but don't advance rPC
+ bl artSet8StaticFromCode
+ cbnz w0, MterpException // 0 on success
+ ADVANCE 2 // Past exception point - now advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_byte: /* 0x6b */
+/* File: arm64/op_sput_byte.S */
+/* File: arm64/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field//BBBB */
+ EXPORT_PC
+ FETCH w0, 1 // r0<- field ref BBBB
+ lsr w3, wINST, #8 // r3<- AA
+ GET_VREG w1, w3 // r1<= fp[AA]
+ ldr x2, [xFP, #OFF_FP_METHOD]
+ mov x3, xSELF
+ PREFETCH_INST 2 // Get next inst, but don't advance rPC
+ bl artSet8StaticFromCode
+ cbnz w0, MterpException // 0 on success
+ ADVANCE 2 // Past exception point - now advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_char: /* 0x6c */
+/* File: arm64/op_sput_char.S */
+/* File: arm64/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field//BBBB */
+ EXPORT_PC
+ FETCH w0, 1 // r0<- field ref BBBB
+ lsr w3, wINST, #8 // r3<- AA
+ GET_VREG w1, w3 // r1<= fp[AA]
+ ldr x2, [xFP, #OFF_FP_METHOD]
+ mov x3, xSELF
+ PREFETCH_INST 2 // Get next inst, but don't advance rPC
+ bl artSet16StaticFromCode
+ cbnz w0, MterpException // 0 on success
+ ADVANCE 2 // Past exception point - now advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_short: /* 0x6d */
+/* File: arm64/op_sput_short.S */
+/* File: arm64/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field//BBBB */
+ EXPORT_PC
+ FETCH w0, 1 // r0<- field ref BBBB
+ lsr w3, wINST, #8 // r3<- AA
+ GET_VREG w1, w3 // r1<= fp[AA]
+ ldr x2, [xFP, #OFF_FP_METHOD]
+ mov x3, xSELF
+ PREFETCH_INST 2 // Get next inst, but don't advance rPC
+ bl artSet16StaticFromCode
+ cbnz w0, MterpException // 0 on success
+ ADVANCE 2 // Past exception point - now advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual: /* 0x6e */
+/* File: arm64/op_invoke_virtual.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtual
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ // and x3, xINST, 0xFFFF
+ mov x3, xINST
+ bl MterpInvokeVirtual
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_super: /* 0x6f */
+/* File: arm64/op_invoke_super.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeSuper
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ // and x3, xINST, 0xFFFF
+ mov x3, xINST
+ bl MterpInvokeSuper
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_direct: /* 0x70 */
+/* File: arm64/op_invoke_direct.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeDirect
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ // and x3, xINST, 0xFFFF
+ mov x3, xINST
+ bl MterpInvokeDirect
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_static: /* 0x71 */
+/* File: arm64/op_invoke_static.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeStatic
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ // and x3, xINST, 0xFFFF
+ mov x3, xINST
+ bl MterpInvokeStatic
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_interface: /* 0x72 */
+/* File: arm64/op_invoke_interface.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeInterface
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ // and x3, xINST, 0xFFFF
+ mov x3, xINST
+ bl MterpInvokeInterface
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_void_no_barrier: /* 0x73 */
+/* File: arm64/op_return_void_no_barrier.S */
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov x0, xSELF
+ ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.ne .Lop_return_void_no_barrier_check
+.Lop_return_void_no_barrier_return:
+ mov x0, #0
+ b MterpReturn
+.Lop_return_void_no_barrier_check:
+ bl MterpSuspendCheck // (self)
+ b .Lop_return_void_no_barrier_return
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_range: /* 0x74 */
+/* File: arm64/op_invoke_virtual_range.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualRange
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ // and x3, xINST, 0xFFFF
+ mov x3, xINST
+ bl MterpInvokeVirtualRange
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_super_range: /* 0x75 */
+/* File: arm64/op_invoke_super_range.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeSuperRange
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ // and x3, xINST, 0xFFFF
+ mov x3, xINST
+ bl MterpInvokeSuperRange
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_direct_range: /* 0x76 */
+/* File: arm64/op_invoke_direct_range.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeDirectRange
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ // and x3, xINST, 0xFFFF
+ mov x3, xINST
+ bl MterpInvokeDirectRange
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_static_range: /* 0x77 */
+/* File: arm64/op_invoke_static_range.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeStaticRange
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ // and x3, xINST, 0xFFFF
+ mov x3, xINST
+ bl MterpInvokeStaticRange
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_interface_range: /* 0x78 */
+/* File: arm64/op_invoke_interface_range.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeInterfaceRange
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ // and x3, xINST, 0xFFFF
+ mov x3, xINST
+ bl MterpInvokeInterfaceRange
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_79: /* 0x79 */
+/* File: arm64/op_unused_79.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_7a: /* 0x7a */
+/* File: arm64/op_unused_7a.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_int: /* 0x7b */
+/* File: arm64/op_neg_int.S */
+/* File: arm64/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op w0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ GET_VREG w0, w3 // w0<- vB
+ ubfx w9, wINST, #8, #4 // w9<- A
+ // optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ sub w0, wzr, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_not_int: /* 0x7c */
+/* File: arm64/op_not_int.S */
+/* File: arm64/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op w0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ GET_VREG w0, w3 // w0<- vB
+ ubfx w9, wINST, #8, #4 // w9<- A
+ // optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ mvn w0, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_long: /* 0x7d */
+/* File: arm64/op_neg_long.S */
+/* File: arm64/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op x0".
+ *
+ * For: neg-long, not-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w4, wINST, #8, #4 // w4<- A
+ GET_VREG_WIDE x0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+
+ sub x0, xzr, x0
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x0, w4
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_not_long: /* 0x7e */
+/* File: arm64/op_not_long.S */
+/* File: arm64/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op x0".
+ *
+ * For: neg-long, not-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w4, wINST, #8, #4 // w4<- A
+ GET_VREG_WIDE x0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+
+ mvn x0, x0
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x0, w4
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_float: /* 0x7f */
+/* File: arm64/op_neg_float.S */
+/* File: arm64/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op w0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ GET_VREG w0, w3 // w0<- vB
+ ubfx w9, wINST, #8, #4 // w9<- A
+ mov w4, #0x80000000 // optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ add w0, w0, w4 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_double: /* 0x80 */
+/* File: arm64/op_neg_double.S */
+/* File: arm64/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op x0".
+ *
+ * For: neg-long, not-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w4, wINST, #8, #4 // w4<- A
+ GET_VREG_WIDE x0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ mov x1, #0x8000000000000000
+ add x0, x0, x1
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x0, w4
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_long: /* 0x81 */
+/* File: arm64/op_int_to_long.S */
+/* File: arm64/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "x0 = op w0".
+ *
+ * For: int-to-double, float-to-double, float-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG w0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ sbfm x0, x0, 0, 31 // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x0, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_float: /* 0x82 */
+/* File: arm64/op_int_to_float.S */
+/* File: arm64/funopNarrow.S */
+ /*
+ * Generic 32bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op w0".
+ *
+ * For: int-to-float, float-to-int
+ * TODO: refactor all of the conversions - parameterize width and use same template.
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG w0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ scvtf s0, w0 // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG s0, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_double: /* 0x83 */
+/* File: arm64/op_int_to_double.S */
+/* File: arm64/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op w0".
+ *
+ * For: int-to-double, float-to-double, float-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG w0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ scvtf d0, w0 // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE d0, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_int: /* 0x84 */
+/* File: arm64/op_long_to_int.S */
+/* File: arm64/funopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "w0 = op x0".
+ *
+ * For: int-to-double, float-to-double, float-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG_WIDE x0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG w0, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_float: /* 0x85 */
+/* File: arm64/op_long_to_float.S */
+/* File: arm64/funopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op x0".
+ *
+ * For: int-to-double, float-to-double, float-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG_WIDE x0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ scvtf s0, x0 // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG s0, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_double: /* 0x86 */
+/* File: arm64/op_long_to_double.S */
+/* File: arm64/funopWide.S */
+ /*
+ * Generic 64bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op x0".
+ *
+ * For: long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG_WIDE x0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ scvtf d0, x0 // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE d0, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_int: /* 0x87 */
+/* File: arm64/op_float_to_int.S */
+/* File: arm64/funopNarrow.S */
+ /*
+ * Generic 32bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "w0 = op s0".
+ *
+ * For: int-to-float, float-to-int
+ * TODO: refactor all of the conversions - parameterize width and use same template.
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG s0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ fcvtzs w0, s0 // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG w0, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_long: /* 0x88 */
+/* File: arm64/op_float_to_long.S */
+/* File: arm64/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "x0 = op s0".
+ *
+ * For: int-to-double, float-to-double, float-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG s0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ fcvtzs x0, s0 // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x0, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_double: /* 0x89 */
+/* File: arm64/op_float_to_double.S */
+/* File: arm64/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op s0".
+ *
+ * For: int-to-double, float-to-double, float-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG s0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ fcvt d0, s0 // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE d0, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_int: /* 0x8a */
+/* File: arm64/op_double_to_int.S */
+/* File: arm64/funopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "w0 = op d0".
+ *
+ * For: int-to-double, float-to-double, float-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG_WIDE d0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ fcvtzs w0, d0 // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG w0, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_long: /* 0x8b */
+/* File: arm64/op_double_to_long.S */
+/* File: arm64/funopWide.S */
+ /*
+ * Generic 64bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "x0 = op d0".
+ *
+ * For: long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG_WIDE d0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ fcvtzs x0, d0 // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x0, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_float: /* 0x8c */
+/* File: arm64/op_double_to_float.S */
+/* File: arm64/funopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op d0".
+ *
+ * For: int-to-double, float-to-double, float-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w4, wINST, #8 // w4<- A+
+ GET_VREG_WIDE d0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ and w4, w4, #15 // w4<- A
+ fcvt s0, d0 // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG s0, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_byte: /* 0x8d */
+/* File: arm64/op_int_to_byte.S */
+/* File: arm64/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op w0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ GET_VREG w0, w3 // w0<- vB
+ ubfx w9, wINST, #8, #4 // w9<- A
+ // optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ sxtb w0, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_char: /* 0x8e */
+/* File: arm64/op_int_to_char.S */
+/* File: arm64/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op w0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ GET_VREG w0, w3 // w0<- vB
+ ubfx w9, wINST, #8, #4 // w9<- A
+ // optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ uxth w0, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_short: /* 0x8f */
+/* File: arm64/op_int_to_short.S */
+/* File: arm64/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op w0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ GET_VREG w0, w3 // w0<- vB
+ ubfx w9, wINST, #8, #4 // w9<- A
+ // optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ sxth w0, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int: /* 0x90 */
+/* File: arm64/op_add_int.S */
+/* File: arm64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w9, wINST, #8 // w9<- AA
+ lsr w3, w0, #8 // w3<- CC
+ and w2, w0, #255 // w2<- BB
+ GET_VREG w1, w3 // w1<- vCC
+ GET_VREG w0, w2 // w0<- vBB
+ .if 0
+ cbz w1, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ add w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_int: /* 0x91 */
+/* File: arm64/op_sub_int.S */
+/* File: arm64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w9, wINST, #8 // w9<- AA
+ lsr w3, w0, #8 // w3<- CC
+ and w2, w0, #255 // w2<- BB
+ GET_VREG w1, w3 // w1<- vCC
+ GET_VREG w0, w2 // w0<- vBB
+ .if 0
+ cbz w1, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ sub w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int: /* 0x92 */
+/* File: arm64/op_mul_int.S */
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+/* File: arm64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w9, wINST, #8 // w9<- AA
+ lsr w3, w0, #8 // w3<- CC
+ and w2, w0, #255 // w2<- BB
+ GET_VREG w1, w3 // w1<- vCC
+ GET_VREG w0, w2 // w0<- vBB
+ .if 0
+ cbz w1, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ mul w0, w1, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int: /* 0x93 */
+/* File: arm64/op_div_int.S */
+/* File: arm64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w9, wINST, #8 // w9<- AA
+ lsr w3, w0, #8 // w3<- CC
+ and w2, w0, #255 // w2<- BB
+ GET_VREG w1, w3 // w1<- vCC
+ GET_VREG w0, w2 // w0<- vBB
+ .if 1
+ cbz w1, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ sdiv w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int: /* 0x94 */
+/* File: arm64/op_rem_int.S */
+/* File: arm64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w9, wINST, #8 // w9<- AA
+ lsr w3, w0, #8 // w3<- CC
+ and w2, w0, #255 // w2<- BB
+ GET_VREG w1, w3 // w1<- vCC
+ GET_VREG w0, w2 // w0<- vBB
+ .if 1
+ cbz w1, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ sdiv w2, w0, w1 // optional op; may set condition codes
+ msub w0, w2, w1, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int: /* 0x95 */
+/* File: arm64/op_and_int.S */
+/* File: arm64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w9, wINST, #8 // w9<- AA
+ lsr w3, w0, #8 // w3<- CC
+ and w2, w0, #255 // w2<- BB
+ GET_VREG w1, w3 // w1<- vCC
+ GET_VREG w0, w2 // w0<- vBB
+ .if 0
+ cbz w1, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ and w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int: /* 0x96 */
+/* File: arm64/op_or_int.S */
+/* File: arm64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w9, wINST, #8 // w9<- AA
+ lsr w3, w0, #8 // w3<- CC
+ and w2, w0, #255 // w2<- BB
+ GET_VREG w1, w3 // w1<- vCC
+ GET_VREG w0, w2 // w0<- vBB
+ .if 0
+ cbz w1, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ orr w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int: /* 0x97 */
+/* File: arm64/op_xor_int.S */
+/* File: arm64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w9, wINST, #8 // w9<- AA
+ lsr w3, w0, #8 // w3<- CC
+ and w2, w0, #255 // w2<- BB
+ GET_VREG w1, w3 // w1<- vCC
+ GET_VREG w0, w2 // w0<- vBB
+ .if 0
+ cbz w1, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ eor w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int: /* 0x98 */
+/* File: arm64/op_shl_int.S */
+/* File: arm64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w9, wINST, #8 // w9<- AA
+ lsr w3, w0, #8 // w3<- CC
+ and w2, w0, #255 // w2<- BB
+ GET_VREG w1, w3 // w1<- vCC
+ GET_VREG w0, w2 // w0<- vBB
+ .if 0
+ cbz w1, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ and w1, w1, #31 // optional op; may set condition codes
+ lsl w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int: /* 0x99 */
+/* File: arm64/op_shr_int.S */
+/* File: arm64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w9, wINST, #8 // w9<- AA
+ lsr w3, w0, #8 // w3<- CC
+ and w2, w0, #255 // w2<- BB
+ GET_VREG w1, w3 // w1<- vCC
+ GET_VREG w0, w2 // w0<- vBB
+ .if 0
+ cbz w1, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ and w1, w1, #31 // optional op; may set condition codes
+ asr w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int: /* 0x9a */
+/* File: arm64/op_ushr_int.S */
+/* File: arm64/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w9, wINST, #8 // w9<- AA
+ lsr w3, w0, #8 // w3<- CC
+ and w2, w0, #255 // w2<- BB
+ GET_VREG w1, w3 // w1<- vCC
+ GET_VREG w0, w2 // w0<- vBB
+ .if 0
+ cbz w1, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ and w1, w1, #31 // optional op; may set condition codes
+ lsr w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_long: /* 0x9b */
+/* File: arm64/op_add_long.S */
+/* File: arm64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE x2, w2 // w2<- vCC
+ GET_VREG_WIDE x1, w1 // w1<- vBB
+ .if 0
+ cbz x2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ add x0, x1, x2 // x0<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w4 // vAA<- x0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_long: /* 0x9c */
+/* File: arm64/op_sub_long.S */
+/* File: arm64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE x2, w2 // w2<- vCC
+ GET_VREG_WIDE x1, w1 // w1<- vBB
+ .if 0
+ cbz x2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ sub x0, x1, x2 // x0<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w4 // vAA<- x0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_long: /* 0x9d */
+/* File: arm64/op_mul_long.S */
+/* File: arm64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE x2, w2 // w2<- vCC
+ GET_VREG_WIDE x1, w1 // w1<- vBB
+ .if 0
+ cbz x2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ mul x0, x1, x2 // x0<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w4 // vAA<- x0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_long: /* 0x9e */
+/* File: arm64/op_div_long.S */
+/* File: arm64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE x2, w2 // w2<- vCC
+ GET_VREG_WIDE x1, w1 // w1<- vBB
+ .if 1
+ cbz x2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ sdiv x0, x1, x2 // x0<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w4 // vAA<- x0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_long: /* 0x9f */
+/* File: arm64/op_rem_long.S */
+/* File: arm64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE x2, w2 // w2<- vCC
+ GET_VREG_WIDE x1, w1 // w1<- vBB
+ .if 1
+ cbz x2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ sdiv x3, x1, x2
+ msub x0, x3, x2, x1 // x0<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w4 // vAA<- x0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_long: /* 0xa0 */
+/* File: arm64/op_and_long.S */
+/* File: arm64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE x2, w2 // w2<- vCC
+ GET_VREG_WIDE x1, w1 // w1<- vBB
+ .if 0
+ cbz x2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ and x0, x1, x2 // x0<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w4 // vAA<- x0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_long: /* 0xa1 */
+/* File: arm64/op_or_long.S */
+/* File: arm64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE x2, w2 // w2<- vCC
+ GET_VREG_WIDE x1, w1 // w1<- vBB
+ .if 0
+ cbz x2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ orr x0, x1, x2 // x0<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w4 // vAA<- x0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_long: /* 0xa2 */
+/* File: arm64/op_xor_long.S */
+/* File: arm64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE x2, w2 // w2<- vCC
+ GET_VREG_WIDE x1, w1 // w1<- vBB
+ .if 0
+ cbz x2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ eor x0, x1, x2 // x0<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w4 // vAA<- x0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_long: /* 0xa3 */
+/* File: arm64/op_shl_long.S */
+/* File: arm64/shiftWide.S */
+ /*
+ * 64-bit shift operation.
+ *
+ * For: shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w3, wINST, #8 // w3<- AA
+ lsr w2, w0, #8 // w2<- CC
+ GET_VREG w2, w2 // w2<- vCC (shift count)
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE x1, w1 // x1<- vBB
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ and x2, x2, #63 // Mask low 6
+ lsl x0, x1, x2 // Do the shift.
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w3 // vAA<- x0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_long: /* 0xa4 */
+/* File: arm64/op_shr_long.S */
+/* File: arm64/shiftWide.S */
+ /*
+ * 64-bit shift operation.
+ *
+ * For: shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w3, wINST, #8 // w3<- AA
+ lsr w2, w0, #8 // w2<- CC
+ GET_VREG w2, w2 // w2<- vCC (shift count)
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE x1, w1 // x1<- vBB
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ and x2, x2, #63 // Mask low 6
+ asr x0, x1, x2 // Do the shift.
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w3 // vAA<- x0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_long: /* 0xa5 */
+/* File: arm64/op_ushr_long.S */
+/* File: arm64/shiftWide.S */
+ /*
+ * 64-bit shift operation.
+ *
+ * For: shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w3, wINST, #8 // w3<- AA
+ lsr w2, w0, #8 // w2<- CC
+ GET_VREG w2, w2 // w2<- vCC (shift count)
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE x1, w1 // x1<- vBB
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ and x2, x2, #63 // Mask low 6
+ lsr x0, x1, x2 // Do the shift.
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w3 // vAA<- x0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_float: /* 0xa6 */
+/* File: arm64/op_add_float.S */
+/* File: arm64/fbinop.S */
+ /*:
+ * Generic 32-bit floating-point operation.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ * form: <op> s0, s0, s1
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH w0, 1 // r0<- CCBB
+ lsr w1, w0, #8 // r2<- CC
+ and w0, w0, #255 // r1<- BB
+ GET_VREG s1, w1
+ GET_VREG s0, w0
+ fadd s0, s0, s1 // s0<- op
+ lsr w1, wINST, #8 // r1<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s0, w1
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_float: /* 0xa7 */
+/* File: arm64/op_sub_float.S */
+/* File: arm64/fbinop.S */
+ /*:
+ * Generic 32-bit floating-point operation.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ * form: <op> s0, s0, s1
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH w0, 1 // r0<- CCBB
+ lsr w1, w0, #8 // r2<- CC
+ and w0, w0, #255 // r1<- BB
+ GET_VREG s1, w1
+ GET_VREG s0, w0
+ fsub s0, s0, s1 // s0<- op
+ lsr w1, wINST, #8 // r1<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s0, w1
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_float: /* 0xa8 */
+/* File: arm64/op_mul_float.S */
+/* File: arm64/fbinop.S */
+ /*:
+ * Generic 32-bit floating-point operation.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ * form: <op> s0, s0, s1
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH w0, 1 // r0<- CCBB
+ lsr w1, w0, #8 // r2<- CC
+ and w0, w0, #255 // r1<- BB
+ GET_VREG s1, w1
+ GET_VREG s0, w0
+ fmul s0, s0, s1 // s0<- op
+ lsr w1, wINST, #8 // r1<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s0, w1
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_float: /* 0xa9 */
+/* File: arm64/op_div_float.S */
+/* File: arm64/fbinop.S */
+ /*:
+ * Generic 32-bit floating-point operation.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ * form: <op> s0, s0, s1
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH w0, 1 // r0<- CCBB
+ lsr w1, w0, #8 // r2<- CC
+ and w0, w0, #255 // r1<- BB
+ GET_VREG s1, w1
+ GET_VREG s0, w0
+ fdiv s0, s0, s1 // s0<- op
+ lsr w1, wINST, #8 // r1<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s0, w1
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_float: /* 0xaa */
+/* File: arm64/op_rem_float.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: arm64/fbinop.S */
+ /*:
+ * Generic 32-bit floating-point operation.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ * form: <op> s0, s0, s1
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH w0, 1 // r0<- CCBB
+ lsr w1, w0, #8 // r2<- CC
+ and w0, w0, #255 // r1<- BB
+ GET_VREG s1, w1
+ GET_VREG s0, w0
+ bl fmodf // s0<- op
+ lsr w1, wINST, #8 // r1<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s0, w1
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_double: /* 0xab */
+/* File: arm64/op_add_double.S */
+/* File: arm64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE d2, w2 // w2<- vCC
+ GET_VREG_WIDE d1, w1 // w1<- vBB
+ .if 0
+ cbz d2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ fadd d0, d1, d2 // d0<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w4 // vAA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_double: /* 0xac */
+/* File: arm64/op_sub_double.S */
+/* File: arm64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE d2, w2 // w2<- vCC
+ GET_VREG_WIDE d1, w1 // w1<- vBB
+ .if 0
+ cbz d2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ fsub d0, d1, d2 // d0<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w4 // vAA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_double: /* 0xad */
+/* File: arm64/op_mul_double.S */
+/* File: arm64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE d2, w2 // w2<- vCC
+ GET_VREG_WIDE d1, w1 // w1<- vBB
+ .if 0
+ cbz d2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ fmul d0, d1, d2 // d0<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w4 // vAA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_double: /* 0xae */
+/* File: arm64/op_div_double.S */
+/* File: arm64/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE d2, w2 // w2<- vCC
+ GET_VREG_WIDE d1, w1 // w1<- vBB
+ .if 0
+ cbz d2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ fdiv d0, d1, d2 // d0<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w4 // vAA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_double: /* 0xaf */
+/* File: arm64/op_rem_double.S */
+ /* rem vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE d1, w2 // d1<- vCC
+ GET_VREG_WIDE d0, w1 // d0<- vBB
+ bl fmod
+ lsr w4, wINST, #8 // w4<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w4 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_2addr: /* 0xb0 */
+/* File: arm64/op_add_int_2addr.S */
+/* File: arm64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w1, w3 // w1<- vB
+ GET_VREG w0, w9 // w0<- vA
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ // optional op; may set condition codes
+ add w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_int_2addr: /* 0xb1 */
+/* File: arm64/op_sub_int_2addr.S */
+/* File: arm64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w1, w3 // w1<- vB
+ GET_VREG w0, w9 // w0<- vA
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ // optional op; may set condition codes
+ sub w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_2addr: /* 0xb2 */
+/* File: arm64/op_mul_int_2addr.S */
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+/* File: arm64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w1, w3 // w1<- vB
+ GET_VREG w0, w9 // w0<- vA
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ // optional op; may set condition codes
+ mul w0, w1, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_2addr: /* 0xb3 */
+/* File: arm64/op_div_int_2addr.S */
+/* File: arm64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w1, w3 // w1<- vB
+ GET_VREG w0, w9 // w0<- vA
+ .if 1
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ // optional op; may set condition codes
+ sdiv w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_2addr: /* 0xb4 */
+/* File: arm64/op_rem_int_2addr.S */
+/* File: arm64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w1, w3 // w1<- vB
+ GET_VREG w0, w9 // w0<- vA
+ .if 1
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ sdiv w2, w0, w1 // optional op; may set condition codes
+ msub w0, w2, w1, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_2addr: /* 0xb5 */
+/* File: arm64/op_and_int_2addr.S */
+/* File: arm64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w1, w3 // w1<- vB
+ GET_VREG w0, w9 // w0<- vA
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ // optional op; may set condition codes
+ and w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_2addr: /* 0xb6 */
+/* File: arm64/op_or_int_2addr.S */
+/* File: arm64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w1, w3 // w1<- vB
+ GET_VREG w0, w9 // w0<- vA
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ // optional op; may set condition codes
+ orr w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_2addr: /* 0xb7 */
+/* File: arm64/op_xor_int_2addr.S */
+/* File: arm64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w1, w3 // w1<- vB
+ GET_VREG w0, w9 // w0<- vA
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ // optional op; may set condition codes
+ eor w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int_2addr: /* 0xb8 */
+/* File: arm64/op_shl_int_2addr.S */
+/* File: arm64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w1, w3 // w1<- vB
+ GET_VREG w0, w9 // w0<- vA
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ and w1, w1, #31 // optional op; may set condition codes
+ lsl w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int_2addr: /* 0xb9 */
+/* File: arm64/op_shr_int_2addr.S */
+/* File: arm64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w1, w3 // w1<- vB
+ GET_VREG w0, w9 // w0<- vA
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ and w1, w1, #31 // optional op; may set condition codes
+ asr w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int_2addr: /* 0xba */
+/* File: arm64/op_ushr_int_2addr.S */
+/* File: arm64/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w1, w3 // w1<- vB
+ GET_VREG w0, w9 // w0<- vA
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ and w1, w1, #31 // optional op; may set condition codes
+ lsr w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_long_2addr: /* 0xbb */
+/* File: arm64/op_add_long_2addr.S */
+/* File: arm64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE x1, w1 // x1<- vB
+ GET_VREG_WIDE x0, w2 // x0<- vA
+ .if 0
+ cbz x1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+
+ add x0, x0, x1 // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_long_2addr: /* 0xbc */
+/* File: arm64/op_sub_long_2addr.S */
+/* File: arm64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE x1, w1 // x1<- vB
+ GET_VREG_WIDE x0, w2 // x0<- vA
+ .if 0
+ cbz x1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+
+ sub x0, x0, x1 // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_long_2addr: /* 0xbd */
+/* File: arm64/op_mul_long_2addr.S */
+/* File: arm64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE x1, w1 // x1<- vB
+ GET_VREG_WIDE x0, w2 // x0<- vA
+ .if 0
+ cbz x1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+
+ mul x0, x0, x1 // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_long_2addr: /* 0xbe */
+/* File: arm64/op_div_long_2addr.S */
+/* File: arm64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE x1, w1 // x1<- vB
+ GET_VREG_WIDE x0, w2 // x0<- vA
+ .if 1
+ cbz x1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+
+ sdiv x0, x0, x1 // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_long_2addr: /* 0xbf */
+/* File: arm64/op_rem_long_2addr.S */
+/* File: arm64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE x1, w1 // x1<- vB
+ GET_VREG_WIDE x0, w2 // x0<- vA
+ .if 1
+ cbz x1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ sdiv x3, x0, x1
+ msub x0, x3, x1, x0 // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_long_2addr: /* 0xc0 */
+/* File: arm64/op_and_long_2addr.S */
+/* File: arm64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE x1, w1 // x1<- vB
+ GET_VREG_WIDE x0, w2 // x0<- vA
+ .if 0
+ cbz x1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+
+ and x0, x0, x1 // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_long_2addr: /* 0xc1 */
+/* File: arm64/op_or_long_2addr.S */
+/* File: arm64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE x1, w1 // x1<- vB
+ GET_VREG_WIDE x0, w2 // x0<- vA
+ .if 0
+ cbz x1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+
+ orr x0, x0, x1 // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_long_2addr: /* 0xc2 */
+/* File: arm64/op_xor_long_2addr.S */
+/* File: arm64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE x1, w1 // x1<- vB
+ GET_VREG_WIDE x0, w2 // x0<- vA
+ .if 0
+ cbz x1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+
+ eor x0, x0, x1 // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_long_2addr: /* 0xc3 */
+/* File: arm64/op_shl_long_2addr.S */
+/* File: arm64/shiftWide2addr.S */
+ /*
+ * Generic 64-bit shift operation.
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w1, w1 // x1<- vB
+ GET_VREG_WIDE x0, w2 // x0<- vA
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ and x1, x1, #63 // Mask low 6 bits.
+ lsl x0, x0, x1
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_long_2addr: /* 0xc4 */
+/* File: arm64/op_shr_long_2addr.S */
+/* File: arm64/shiftWide2addr.S */
+ /*
+ * Generic 64-bit shift operation.
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w1, w1 // x1<- vB
+ GET_VREG_WIDE x0, w2 // x0<- vA
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ and x1, x1, #63 // Mask low 6 bits.
+ asr x0, x0, x1
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_long_2addr: /* 0xc5 */
+/* File: arm64/op_ushr_long_2addr.S */
+/* File: arm64/shiftWide2addr.S */
+ /*
+ * Generic 64-bit shift operation.
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w1, w1 // x1<- vB
+ GET_VREG_WIDE x0, w2 // x0<- vA
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ and x1, x1, #63 // Mask low 6 bits.
+ lsr x0, x0, x1
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_float_2addr: /* 0xc6 */
+/* File: arm64/op_add_float_2addr.S */
+/* File: arm64/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w9, wINST, #8 // w9<- A+
+ and w9, w9, #15 // w9<- A
+ GET_VREG s1, w3
+ GET_VREG s0, w9
+ fadd s2, s0, s1 // s2<- op
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s2, w9
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_float_2addr: /* 0xc7 */
+/* File: arm64/op_sub_float_2addr.S */
+/* File: arm64/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w9, wINST, #8 // w9<- A+
+ and w9, w9, #15 // w9<- A
+ GET_VREG s1, w3
+ GET_VREG s0, w9
+ fsub s2, s0, s1 // s2<- op
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s2, w9
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_float_2addr: /* 0xc8 */
+/* File: arm64/op_mul_float_2addr.S */
+/* File: arm64/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w9, wINST, #8 // w9<- A+
+ and w9, w9, #15 // w9<- A
+ GET_VREG s1, w3
+ GET_VREG s0, w9
+ fmul s2, s0, s1 // s2<- op
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s2, w9
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_float_2addr: /* 0xc9 */
+/* File: arm64/op_div_float_2addr.S */
+/* File: arm64/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w9, wINST, #8 // w9<- A+
+ and w9, w9, #15 // w9<- A
+ GET_VREG s1, w3
+ GET_VREG s0, w9
+ fdiv s2, s0, s1 // s2<- op
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s2, w9
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_float_2addr: /* 0xca */
+/* File: arm64/op_rem_float_2addr.S */
+ /* rem vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ lsr w9, wINST, #8 // w9<- A+
+ and w9, w9, #15 // w9<- A
+ GET_VREG s1, w3
+ GET_VREG s0, w9
+ bl fmodf
+ lsr w9, wINST, #8 // w9<- A+
+ and w9, w9, #15 // w9<- A
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s0, w9
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_double_2addr: /* 0xcb */
+/* File: arm64/op_add_double_2addr.S */
+/* File: arm64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE d1, w1 // x1<- vB
+ GET_VREG_WIDE d0, w2 // x0<- vA
+ .if 0
+ cbz d1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+
+ fadd d0, d0, d1 // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_double_2addr: /* 0xcc */
+/* File: arm64/op_sub_double_2addr.S */
+/* File: arm64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE d1, w1 // x1<- vB
+ GET_VREG_WIDE d0, w2 // x0<- vA
+ .if 0
+ cbz d1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+
+ fsub d0, d0, d1 // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_double_2addr: /* 0xcd */
+/* File: arm64/op_mul_double_2addr.S */
+/* File: arm64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE d1, w1 // x1<- vB
+ GET_VREG_WIDE d0, w2 // x0<- vA
+ .if 0
+ cbz d1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+
+ fmul d0, d0, d1 // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_double_2addr: /* 0xce */
+/* File: arm64/op_div_double_2addr.S */
+/* File: arm64/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE d1, w1 // x1<- vB
+ GET_VREG_WIDE d0, w2 // x0<- vA
+ .if 0
+ cbz d1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+
+ fdiv d0, d0, d1 // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_double_2addr: /* 0xcf */
+/* File: arm64/op_rem_double_2addr.S */
+ /* rem vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE d1, w1 // d1<- vB
+ GET_VREG_WIDE d0, w2 // d0<- vA
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ bl fmod
+ ubfx w2, wINST, #8, #4 // w2<- A (need to reload - killed across call)
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_lit16: /* 0xd0 */
+/* File: arm64/op_add_int_lit16.S */
+/* File: arm64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
+ lsr w2, wINST, #12 // w2<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w0, w2 // w0<- vB
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ add w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rsub_int: /* 0xd1 */
+/* File: arm64/op_rsub_int.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: arm64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
+ lsr w2, wINST, #12 // w2<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w0, w2 // w0<- vB
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ sub w0, w1, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_lit16: /* 0xd2 */
+/* File: arm64/op_mul_int_lit16.S */
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+/* File: arm64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
+ lsr w2, wINST, #12 // w2<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w0, w2 // w0<- vB
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ mul w0, w1, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_lit16: /* 0xd3 */
+/* File: arm64/op_div_int_lit16.S */
+/* File: arm64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
+ lsr w2, wINST, #12 // w2<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w0, w2 // w0<- vB
+ .if 1
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ sdiv w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_lit16: /* 0xd4 */
+/* File: arm64/op_rem_int_lit16.S */
+/* File: arm64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
+ lsr w2, wINST, #12 // w2<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w0, w2 // w0<- vB
+ .if 1
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ sdiv w3, w0, w1
+ msub w0, w3, w1, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_lit16: /* 0xd5 */
+/* File: arm64/op_and_int_lit16.S */
+/* File: arm64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
+ lsr w2, wINST, #12 // w2<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w0, w2 // w0<- vB
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ and w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_lit16: /* 0xd6 */
+/* File: arm64/op_or_int_lit16.S */
+/* File: arm64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
+ lsr w2, wINST, #12 // w2<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w0, w2 // w0<- vB
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ orr w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_lit16: /* 0xd7 */
+/* File: arm64/op_xor_int_lit16.S */
+/* File: arm64/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
+ lsr w2, wINST, #12 // w2<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w0, w2 // w0<- vB
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ eor w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_lit8: /* 0xd8 */
+/* File: arm64/op_add_int_lit8.S */
+/* File: arm64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ lsr w9, wINST, #8 // w9<- AA
+ and w2, w3, #255 // w2<- BB
+ GET_VREG w0, w2 // w0<- vBB
+ asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ add w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rsub_int_lit8: /* 0xd9 */
+/* File: arm64/op_rsub_int_lit8.S */
+/* File: arm64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ lsr w9, wINST, #8 // w9<- AA
+ and w2, w3, #255 // w2<- BB
+ GET_VREG w0, w2 // w0<- vBB
+ asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ sub w0, w1, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_lit8: /* 0xda */
+/* File: arm64/op_mul_int_lit8.S */
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+/* File: arm64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ lsr w9, wINST, #8 // w9<- AA
+ and w2, w3, #255 // w2<- BB
+ GET_VREG w0, w2 // w0<- vBB
+ asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ mul w0, w1, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_lit8: /* 0xdb */
+/* File: arm64/op_div_int_lit8.S */
+/* File: arm64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ lsr w9, wINST, #8 // w9<- AA
+ and w2, w3, #255 // w2<- BB
+ GET_VREG w0, w2 // w0<- vBB
+ asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ .if 1
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ sdiv w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_lit8: /* 0xdc */
+/* File: arm64/op_rem_int_lit8.S */
+/* File: arm64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ lsr w9, wINST, #8 // w9<- AA
+ and w2, w3, #255 // w2<- BB
+ GET_VREG w0, w2 // w0<- vBB
+ asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ .if 1
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ sdiv w3, w0, w1 // optional op; may set condition codes
+ msub w0, w3, w1, w0 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_lit8: /* 0xdd */
+/* File: arm64/op_and_int_lit8.S */
+/* File: arm64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ lsr w9, wINST, #8 // w9<- AA
+ and w2, w3, #255 // w2<- BB
+ GET_VREG w0, w2 // w0<- vBB
+ asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ and w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_lit8: /* 0xde */
+/* File: arm64/op_or_int_lit8.S */
+/* File: arm64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ lsr w9, wINST, #8 // w9<- AA
+ and w2, w3, #255 // w2<- BB
+ GET_VREG w0, w2 // w0<- vBB
+ asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ orr w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_lit8: /* 0xdf */
+/* File: arm64/op_xor_int_lit8.S */
+/* File: arm64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ lsr w9, wINST, #8 // w9<- AA
+ and w2, w3, #255 // w2<- BB
+ GET_VREG w0, w2 // w0<- vBB
+ asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ // optional op; may set condition codes
+ eor w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int_lit8: /* 0xe0 */
+/* File: arm64/op_shl_int_lit8.S */
+/* File: arm64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ lsr w9, wINST, #8 // w9<- AA
+ and w2, w3, #255 // w2<- BB
+ GET_VREG w0, w2 // w0<- vBB
+ asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ and w1, w1, #31 // optional op; may set condition codes
+ lsl w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int_lit8: /* 0xe1 */
+/* File: arm64/op_shr_int_lit8.S */
+/* File: arm64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ lsr w9, wINST, #8 // w9<- AA
+ and w2, w3, #255 // w2<- BB
+ GET_VREG w0, w2 // w0<- vBB
+ asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ and w1, w1, #31 // optional op; may set condition codes
+ asr w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int_lit8: /* 0xe2 */
+/* File: arm64/op_ushr_int_lit8.S */
+/* File: arm64/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ lsr w9, wINST, #8 // w9<- AA
+ and w2, w3, #255 // w2<- BB
+ GET_VREG w0, w2 // w0<- vBB
+ asr w1, w3, #8 // w1<- ssssssCC (sign extended)
+ .if 0
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ and w1, w1, #31 // optional op; may set condition codes
+ lsr w0, w0, w1 // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_quick: /* 0xe3 */
+/* File: arm64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- object we're operating on
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cmp x3, #0 // check object for null
+ beq common_errNullObject // object was null
+ ldr w0, [x3, x1] // w0<- obj.field
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ SET_VREG w0, w2 // fp[A]<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_wide_quick: /* 0xe4 */
+/* File: arm64/op_iget_wide_quick.S */
+ /* iget-wide-quick vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w4, 1 // w4<- field byte offset
+ GET_VREG w3, w2 // w3<- object we're operating on
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cbz w3, common_errNullObject // object was null
+ add x4, x3, x4 // create direct pointer
+ ldr x0, [x4]
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ SET_VREG_WIDE x0, w2
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_object_quick: /* 0xe5 */
+/* File: arm64/op_iget_object_quick.S */
+ /* For: iget-object-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ EXPORT_PC
+ GET_VREG w0, w2 // w0<- object we're operating on
+ bl artIGetObjectFromMterp // (obj, offset)
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx w2, wINST, #8, #4 // w2<- A
+ PREFETCH_INST 2
+ cbnz w3, MterpPossibleException // bail out
+ SET_VREG_OBJECT w0, w2 // fp[A]<- w0
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_quick: /* 0xe6 */
+/* File: arm64/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cmp w3, #0 // check object for null
+ cbz w3, common_errNullObject // object was null
+ GET_VREG w0, w2 // w0<- fp[A]
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ str w0, [x3, x1] // obj.field<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_wide_quick: /* 0xe7 */
+/* File: arm64/op_iput_wide_quick.S */
+ /* iput-wide-quick vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w3, 1 // w3<- field byte offset
+ GET_VREG w2, w2 // w2<- fp[B], the object pointer
+ ubfx w0, wINST, #8, #4 // w0<- A
+ cmp w2, #0 // check object for null
+ beq common_errNullObject // object was null
+ GET_VREG_WIDE x0, w0 // x0-< fp[A]
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ add x1, x2, x3 // create a direct pointer
+ str x0, [x1]
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_object_quick: /* 0xe8 */
+/* File: arm64/op_iput_object_quick.S */
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov w2, wINST
+ bl MterpIputObjectQuick
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_quick: /* 0xe9 */
+/* File: arm64/op_invoke_virtual_quick.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualQuick
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ // and x3, xINST, 0xFFFF
+ mov x3, xINST
+ bl MterpInvokeVirtualQuick
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_range_quick: /* 0xea */
+/* File: arm64/op_invoke_virtual_range_quick.S */
+/* File: arm64/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualQuickRange
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ // and x3, xINST, 0xFFFF
+ mov x3, xINST
+ bl MterpInvokeVirtualQuickRange
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_boolean_quick: /* 0xeb */
+/* File: arm64/op_iput_boolean_quick.S */
+/* File: arm64/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cmp w3, #0 // check object for null
+ cbz w3, common_errNullObject // object was null
+ GET_VREG w0, w2 // w0<- fp[A]
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ strb w0, [x3, x1] // obj.field<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_byte_quick: /* 0xec */
+/* File: arm64/op_iput_byte_quick.S */
+/* File: arm64/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cmp w3, #0 // check object for null
+ cbz w3, common_errNullObject // object was null
+ GET_VREG w0, w2 // w0<- fp[A]
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ strb w0, [x3, x1] // obj.field<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_char_quick: /* 0xed */
+/* File: arm64/op_iput_char_quick.S */
+/* File: arm64/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cmp w3, #0 // check object for null
+ cbz w3, common_errNullObject // object was null
+ GET_VREG w0, w2 // w0<- fp[A]
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ strh w0, [x3, x1] // obj.field<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_short_quick: /* 0xee */
+/* File: arm64/op_iput_short_quick.S */
+/* File: arm64/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cmp w3, #0 // check object for null
+ cbz w3, common_errNullObject // object was null
+ GET_VREG w0, w2 // w0<- fp[A]
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ strh w0, [x3, x1] // obj.field<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_boolean_quick: /* 0xef */
+/* File: arm64/op_iget_boolean_quick.S */
+/* File: arm64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- object we're operating on
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cmp x3, #0 // check object for null
+ beq common_errNullObject // object was null
+ ldrb w0, [x3, x1] // w0<- obj.field
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ SET_VREG w0, w2 // fp[A]<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_byte_quick: /* 0xf0 */
+/* File: arm64/op_iget_byte_quick.S */
+/* File: arm64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- object we're operating on
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cmp x3, #0 // check object for null
+ beq common_errNullObject // object was null
+ ldrsb w0, [x3, x1] // w0<- obj.field
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ SET_VREG w0, w2 // fp[A]<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_char_quick: /* 0xf1 */
+/* File: arm64/op_iget_char_quick.S */
+/* File: arm64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- object we're operating on
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cmp x3, #0 // check object for null
+ beq common_errNullObject // object was null
+ ldrh w0, [x3, x1] // w0<- obj.field
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ SET_VREG w0, w2 // fp[A]<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_short_quick: /* 0xf2 */
+/* File: arm64/op_iget_short_quick.S */
+/* File: arm64/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- object we're operating on
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cmp x3, #0 // check object for null
+ beq common_errNullObject // object was null
+ ldrsh w0, [x3, x1] // w0<- obj.field
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+
+ SET_VREG w0, w2 // fp[A]<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_lambda: /* 0xf3 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_f4: /* 0xf4 */
+/* File: arm64/op_unused_f4.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_capture_variable: /* 0xf5 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_create_lambda: /* 0xf6 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_liberate_variable: /* 0xf7 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_box_lambda: /* 0xf8 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unbox_lambda: /* 0xf9 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fa: /* 0xfa */
+/* File: arm64/op_unused_fa.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fb: /* 0xfb */
+/* File: arm64/op_unused_fb.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fc: /* 0xfc */
+/* File: arm64/op_unused_fc.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fd: /* 0xfd */
+/* File: arm64/op_unused_fd.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fe: /* 0xfe */
+/* File: arm64/op_unused_fe.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_ff: /* 0xff */
+/* File: arm64/op_unused_ff.S */
+/* File: arm64/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+ .balign 128
+ .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
+ .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ * Sister implementations
+ * ===========================================================================
+ */
+ .global artMterpAsmSisterStart
+ .type artMterpAsmSisterStart, %function
+ .text
+ .balign 4
+artMterpAsmSisterStart:
+
+ .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
+ .global artMterpAsmSisterEnd
+artMterpAsmSisterEnd:
+
+
+ .global artMterpAsmAltInstructionStart
+ .type artMterpAsmAltInstructionStart, %function
+ .text
+
+artMterpAsmAltInstructionStart = .L_ALT_op_nop
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_nop: /* 0x00 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (0 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move: /* 0x01 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (1 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_from16: /* 0x02 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (2 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_16: /* 0x03 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (3 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide: /* 0x04 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (4 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide_from16: /* 0x05 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (5 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide_16: /* 0x06 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (6 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object: /* 0x07 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (7 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object_from16: /* 0x08 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (8 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object_16: /* 0x09 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (9 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result: /* 0x0a */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (10 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result_wide: /* 0x0b */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (11 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result_object: /* 0x0c */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (12 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_exception: /* 0x0d */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (13 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_void: /* 0x0e */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (14 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return: /* 0x0f */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (15 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_wide: /* 0x10 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (16 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_object: /* 0x11 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (17 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_4: /* 0x12 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (18 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_16: /* 0x13 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (19 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const: /* 0x14 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (20 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_high16: /* 0x15 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (21 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_16: /* 0x16 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (22 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_32: /* 0x17 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (23 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide: /* 0x18 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (24 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_high16: /* 0x19 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (25 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_string: /* 0x1a */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (26 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_string_jumbo: /* 0x1b */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (27 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_class: /* 0x1c */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (28 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_monitor_enter: /* 0x1d */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (29 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_monitor_exit: /* 0x1e */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (30 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_check_cast: /* 0x1f */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (31 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_instance_of: /* 0x20 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (32 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_array_length: /* 0x21 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (33 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_new_instance: /* 0x22 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (34 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_new_array: /* 0x23 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (35 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_filled_new_array: /* 0x24 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (36 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_filled_new_array_range: /* 0x25 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (37 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_fill_array_data: /* 0x26 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (38 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_throw: /* 0x27 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (39 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto: /* 0x28 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (40 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto_16: /* 0x29 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (41 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto_32: /* 0x2a */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (42 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_packed_switch: /* 0x2b */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (43 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sparse_switch: /* 0x2c */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (44 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpl_float: /* 0x2d */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (45 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpg_float: /* 0x2e */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (46 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpl_double: /* 0x2f */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (47 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpg_double: /* 0x30 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (48 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmp_long: /* 0x31 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (49 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_eq: /* 0x32 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (50 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ne: /* 0x33 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (51 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_lt: /* 0x34 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (52 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ge: /* 0x35 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (53 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gt: /* 0x36 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (54 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_le: /* 0x37 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (55 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_eqz: /* 0x38 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (56 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_nez: /* 0x39 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (57 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ltz: /* 0x3a */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (58 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gez: /* 0x3b */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (59 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gtz: /* 0x3c */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (60 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_lez: /* 0x3d */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (61 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_3e: /* 0x3e */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (62 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_3f: /* 0x3f */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (63 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_40: /* 0x40 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (64 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_41: /* 0x41 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (65 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_42: /* 0x42 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (66 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_43: /* 0x43 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (67 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget: /* 0x44 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (68 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_wide: /* 0x45 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (69 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_object: /* 0x46 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (70 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_boolean: /* 0x47 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (71 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_byte: /* 0x48 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (72 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_char: /* 0x49 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (73 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_short: /* 0x4a */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (74 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput: /* 0x4b */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (75 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_wide: /* 0x4c */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (76 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_object: /* 0x4d */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (77 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_boolean: /* 0x4e */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (78 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_byte: /* 0x4f */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (79 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_char: /* 0x50 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (80 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_short: /* 0x51 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (81 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget: /* 0x52 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (82 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_wide: /* 0x53 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (83 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_object: /* 0x54 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (84 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_boolean: /* 0x55 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (85 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_byte: /* 0x56 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (86 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_char: /* 0x57 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (87 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_short: /* 0x58 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (88 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput: /* 0x59 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (89 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_wide: /* 0x5a */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (90 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_object: /* 0x5b */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (91 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_boolean: /* 0x5c */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (92 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_byte: /* 0x5d */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (93 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_char: /* 0x5e */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (94 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_short: /* 0x5f */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (95 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget: /* 0x60 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (96 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_wide: /* 0x61 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (97 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_object: /* 0x62 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (98 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_boolean: /* 0x63 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (99 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_byte: /* 0x64 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (100 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_char: /* 0x65 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (101 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_short: /* 0x66 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (102 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput: /* 0x67 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (103 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_wide: /* 0x68 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (104 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_object: /* 0x69 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (105 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_boolean: /* 0x6a */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (106 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_byte: /* 0x6b */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (107 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_char: /* 0x6c */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (108 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_short: /* 0x6d */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (109 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual: /* 0x6e */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (110 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_super: /* 0x6f */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (111 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_direct: /* 0x70 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (112 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_static: /* 0x71 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (113 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_interface: /* 0x72 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (114 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_void_no_barrier: /* 0x73 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (115 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_range: /* 0x74 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (116 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_super_range: /* 0x75 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (117 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_direct_range: /* 0x76 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (118 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_static_range: /* 0x77 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (119 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_interface_range: /* 0x78 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (120 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_79: /* 0x79 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (121 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_7a: /* 0x7a */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (122 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_int: /* 0x7b */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (123 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_not_int: /* 0x7c */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (124 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_long: /* 0x7d */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (125 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_not_long: /* 0x7e */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (126 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_float: /* 0x7f */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (127 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_double: /* 0x80 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (128 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_long: /* 0x81 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (129 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_float: /* 0x82 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (130 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_double: /* 0x83 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (131 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_int: /* 0x84 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (132 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_float: /* 0x85 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (133 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_double: /* 0x86 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (134 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_int: /* 0x87 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (135 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_long: /* 0x88 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (136 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_double: /* 0x89 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (137 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_int: /* 0x8a */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (138 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_long: /* 0x8b */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (139 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_float: /* 0x8c */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (140 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_byte: /* 0x8d */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (141 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_char: /* 0x8e */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (142 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_short: /* 0x8f */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (143 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int: /* 0x90 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (144 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_int: /* 0x91 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (145 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int: /* 0x92 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (146 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int: /* 0x93 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (147 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int: /* 0x94 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (148 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int: /* 0x95 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (149 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int: /* 0x96 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (150 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int: /* 0x97 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (151 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int: /* 0x98 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (152 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int: /* 0x99 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (153 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int: /* 0x9a */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (154 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_long: /* 0x9b */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (155 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_long: /* 0x9c */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (156 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_long: /* 0x9d */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (157 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_long: /* 0x9e */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (158 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_long: /* 0x9f */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (159 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_long: /* 0xa0 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (160 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_long: /* 0xa1 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (161 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_long: /* 0xa2 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (162 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_long: /* 0xa3 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (163 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_long: /* 0xa4 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (164 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_long: /* 0xa5 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (165 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_float: /* 0xa6 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (166 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_float: /* 0xa7 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (167 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_float: /* 0xa8 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (168 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_float: /* 0xa9 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (169 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_float: /* 0xaa */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (170 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_double: /* 0xab */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (171 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_double: /* 0xac */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (172 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_double: /* 0xad */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (173 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_double: /* 0xae */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (174 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_double: /* 0xaf */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (175 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_2addr: /* 0xb0 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (176 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_int_2addr: /* 0xb1 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (177 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_2addr: /* 0xb2 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (178 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_2addr: /* 0xb3 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (179 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_2addr: /* 0xb4 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (180 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_2addr: /* 0xb5 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (181 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_2addr: /* 0xb6 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (182 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_2addr: /* 0xb7 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (183 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int_2addr: /* 0xb8 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (184 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int_2addr: /* 0xb9 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (185 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int_2addr: /* 0xba */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (186 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_long_2addr: /* 0xbb */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (187 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_long_2addr: /* 0xbc */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (188 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_long_2addr: /* 0xbd */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (189 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_long_2addr: /* 0xbe */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (190 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_long_2addr: /* 0xbf */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (191 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_long_2addr: /* 0xc0 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (192 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_long_2addr: /* 0xc1 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (193 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_long_2addr: /* 0xc2 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (194 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_long_2addr: /* 0xc3 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (195 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_long_2addr: /* 0xc4 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (196 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_long_2addr: /* 0xc5 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (197 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_float_2addr: /* 0xc6 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (198 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_float_2addr: /* 0xc7 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (199 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_float_2addr: /* 0xc8 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (200 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_float_2addr: /* 0xc9 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (201 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_float_2addr: /* 0xca */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (202 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_double_2addr: /* 0xcb */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (203 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_double_2addr: /* 0xcc */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (204 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_double_2addr: /* 0xcd */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (205 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_double_2addr: /* 0xce */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (206 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_double_2addr: /* 0xcf */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (207 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_lit16: /* 0xd0 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (208 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rsub_int: /* 0xd1 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (209 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_lit16: /* 0xd2 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (210 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_lit16: /* 0xd3 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (211 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_lit16: /* 0xd4 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (212 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_lit16: /* 0xd5 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (213 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_lit16: /* 0xd6 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (214 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_lit16: /* 0xd7 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (215 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_lit8: /* 0xd8 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (216 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rsub_int_lit8: /* 0xd9 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (217 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_lit8: /* 0xda */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (218 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_lit8: /* 0xdb */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (219 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_lit8: /* 0xdc */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (220 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_lit8: /* 0xdd */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (221 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_lit8: /* 0xde */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (222 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_lit8: /* 0xdf */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (223 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int_lit8: /* 0xe0 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (224 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int_lit8: /* 0xe1 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (225 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int_lit8: /* 0xe2 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (226 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_quick: /* 0xe3 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (227 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_wide_quick: /* 0xe4 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (228 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_object_quick: /* 0xe5 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (229 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_quick: /* 0xe6 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (230 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_wide_quick: /* 0xe7 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (231 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_object_quick: /* 0xe8 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (232 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (233 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (234 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_boolean_quick: /* 0xeb */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (235 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_byte_quick: /* 0xec */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (236 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_char_quick: /* 0xed */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (237 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_short_quick: /* 0xee */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (238 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_boolean_quick: /* 0xef */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (239 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_byte_quick: /* 0xf0 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (240 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_char_quick: /* 0xf1 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (241 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_short_quick: /* 0xf2 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (242 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_lambda: /* 0xf3 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (243 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_f4: /* 0xf4 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (244 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_capture_variable: /* 0xf5 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (245 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_create_lambda: /* 0xf6 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (246 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_liberate_variable: /* 0xf7 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (247 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_box_lambda: /* 0xf8 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (248 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unbox_lambda: /* 0xf9 */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (249 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fa: /* 0xfa */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (250 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fb: /* 0xfb */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (251 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fc: /* 0xfc */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (252 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fd: /* 0xfd */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (253 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fe: /* 0xfe */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (254 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_ff: /* 0xff */
+/* File: arm64/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
+ adr lr, artMterpAsmInstructionStart + (255 * 128) // Addr of primary handler.
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore // (self, shadow_frame) Note: tail call.
+
+ .balign 128
+ .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
+ .global artMterpAsmAltInstructionEnd
+artMterpAsmAltInstructionEnd:
+/* File: arm64/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+#define MTERP_LOGGING 0
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogDivideByZeroException
+#endif
+ b MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogArrayIndexException
+#endif
+ b MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNegativeArraySizeException
+#endif
+ b MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNoSuchMethodException
+#endif
+ b MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNullObjectException
+#endif
+ b MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogExceptionThrownException
+#endif
+ b MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ ldr x2, [xSELF, #THREAD_FLAGS_OFFSET]
+ bl MterpLogSuspendFallback
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ ldr x0, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ cbz x0, MterpFallback // If not, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpHandleException // (self, shadow_frame)
+ cbz w0, MterpExceptionReturn // no local catch, back to caller.
+ ldr x0, [xFP, #OFF_FP_CODE_ITEM]
+ ldr w1, [xFP, #OFF_FP_DEX_PC]
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
+ add xPC, x0, #CODEITEM_INSNS_OFFSET
+ add xPC, xPC, x1, lsl #1 // generate new dex_pc_ptr
+ str xPC, [xFP, #OFF_FP_DEX_PC_PTR]
+ /* resume execution at catch block */
+ FETCH_INST
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+ /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request. Assumes wINST already loaded, xPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE
+ ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.ne check1
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+check1:
+ EXPORT_PC
+ mov x0, xSELF
+ bl MterpSuspendCheck // (self)
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogFallback
+#endif
+MterpCommonFallback:
+ mov x0, #0 // signal retry with reference interpreter.
+ b MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * uint32_t* xFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ mov x0, #1 // signal return to caller.
+ b MterpDone
+MterpReturn:
+ ldr x2, [xFP, #OFF_FP_RESULT_REGISTER]
+ ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
+ str x0, [x2]
+ mov x0, xSELF
+ ands lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.eq check2
+ bl MterpSuspendCheck // (self)
+check2:
+ mov x0, #1 // signal return to caller.
+MterpDone:
+ ldp fp, lr, [sp, #48]
+ ldp xPC, xFP, [sp, #32]
+ ldp xSELF, xINST, [sp, #16]
+ ldp xIBASE, xREFS, [sp], #64
+ ret
+
+ .cfi_endproc
+ .size ExecuteMterpImpl, .-ExecuteMterpImpl
+
+
diff --git a/runtime/interpreter/mterp/rebuild.sh b/runtime/interpreter/mterp/rebuild.sh
index 8b26976328..ac8794581c 100755
--- a/runtime/interpreter/mterp/rebuild.sh
+++ b/runtime/interpreter/mterp/rebuild.sh
@@ -21,4 +21,4 @@
set -e
# for arch in arm x86 mips arm64 x86_64 mips64; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
-for arch in arm x86; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
+for arch in arm x86 arm64 ; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 4e0146c603..fa5c41d7ae 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -239,7 +239,7 @@ void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
struct CollectClasses : public ClassVisitor {
- bool Visit(mirror::Class* klass) override {
+ bool operator()(mirror::Class* klass) override {
classes_.push_back(klass);
return true;
}
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index b3439f7643..c6fa15de8c 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -370,15 +370,17 @@ inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, i
}
}
-template<typename T>
+template<typename T, VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline T PointerArray::GetElementPtrSize(uint32_t idx, size_t ptr_size) {
// C style casts here since we sometimes have T be a pointer, or sometimes an integer
// (for stack traces).
if (ptr_size == 8) {
- return (T)static_cast<uintptr_t>(AsLongArray()->GetWithoutChecks(idx));
+ return (T)static_cast<uintptr_t>(
+ AsLongArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx));
}
DCHECK_EQ(ptr_size, 4u);
- return (T)static_cast<uintptr_t>(AsIntArray()->GetWithoutChecks(idx));
+ return (T)static_cast<uintptr_t>(
+ AsIntArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx));
}
template<bool kTransactionActive, bool kUnchecked>
@@ -401,12 +403,12 @@ inline void PointerArray::SetElementPtrSize(uint32_t idx, T* element, size_t ptr
ptr_size);
}
-template <typename Visitor>
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
inline void PointerArray::Fixup(mirror::PointerArray* dest,
size_t pointer_size,
const Visitor& visitor) {
for (size_t i = 0, count = GetLength(); i < count; ++i) {
- void* ptr = GetElementPtrSize<void*>(i, pointer_size);
+ void* ptr = GetElementPtrSize<void*, kVerifyFlags, kReadBarrierOption>(i, pointer_size);
void* new_ptr = visitor(ptr);
if (ptr != new_ptr) {
dest->SetElementPtrSize<false, true>(i, new_ptr, pointer_size);
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 2bd6c5b9a1..9a21ec255c 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -183,7 +183,9 @@ class MANAGED PrimitiveArray : public Array {
// Either an IntArray or a LongArray.
class PointerArray : public Array {
public:
- template<typename T>
+ template<typename T,
+ VerifyObjectFlags kVerifyFlags = kVerifyNone,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
T GetElementPtrSize(uint32_t idx, size_t ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -196,7 +198,9 @@ class PointerArray : public Array {
// Fixup the pointers in the dest arrays by passing our pointers through the visitor. Only copies
// to dest if visitor(source_ptr) != source_ptr.
- template <typename Visitor>
+ template <VerifyObjectFlags kVerifyFlags = kVerifyNone,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
+ typename Visitor>
void Fixup(mirror::PointerArray* dest, size_t pointer_size, const Visitor& visitor)
SHARED_REQUIRES(Locks::mutator_lock_);
};
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index d5783c04b5..422832e03c 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -253,14 +253,16 @@ inline MemberOffset Class::EmbeddedImTableEntryOffset(uint32_t i, size_t pointer
EmbeddedImTableOffset(pointer_size).Uint32Value() + i * ImTableEntrySize(pointer_size));
}
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline ArtMethod* Class::GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size) {
- DCHECK(ShouldHaveEmbeddedImtAndVTable());
+ DCHECK((ShouldHaveEmbeddedImtAndVTable<kVerifyFlags, kReadBarrierOption>()));
return GetFieldPtrWithSize<ArtMethod*>(
EmbeddedImTableEntryOffset(i, pointer_size), pointer_size);
}
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline void Class::SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) {
- DCHECK(ShouldHaveEmbeddedImtAndVTable());
+ DCHECK((ShouldHaveEmbeddedImtAndVTable<kVerifyFlags, kReadBarrierOption>()));
SetFieldPtrWithSize<false>(EmbeddedImTableEntryOffset(i, pointer_size), method, pointer_size);
}
@@ -538,10 +540,11 @@ inline MemberOffset Class::GetFirstReferenceInstanceFieldOffset() {
: ClassOffset();
}
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(size_t pointer_size) {
DCHECK(IsResolved());
uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
- if (ShouldHaveEmbeddedImtAndVTable()) {
+ if (ShouldHaveEmbeddedImtAndVTable<kVerifyFlags, kReadBarrierOption>()) {
// Static fields come after the embedded tables.
base = mirror::Class::ComputeClassSize(
true, GetEmbeddedVTableLength(), 0, 0, 0, 0, 0, pointer_size);
@@ -1057,7 +1060,7 @@ inline uint32_t Class::NumStaticFields() {
return arr != nullptr ? arr->size() : 0u;
}
-template <typename Visitor>
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
inline void Class::FixupNativePointers(mirror::Class* dest,
size_t pointer_size,
const Visitor& visitor) {
@@ -1085,7 +1088,7 @@ inline void Class::FixupNativePointers(mirror::Class* dest,
dest->SetDexCacheStrings(new_strings);
}
// Fix up embedded tables.
- if (!IsTemp() && ShouldHaveEmbeddedImtAndVTable()) {
+ if (!IsTemp() && ShouldHaveEmbeddedImtAndVTable<kVerifyNone, kReadBarrierOption>()) {
for (int32_t i = 0, count = GetEmbeddedVTableLength(); i < count; ++i) {
ArtMethod* method = GetEmbeddedVTableEntry(i, pointer_size);
ArtMethod* new_method = visitor(method);
@@ -1094,10 +1097,13 @@ inline void Class::FixupNativePointers(mirror::Class* dest,
}
}
for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
- ArtMethod* method = GetEmbeddedImTableEntry(i, pointer_size);
+ ArtMethod* method = GetEmbeddedImTableEntry<kVerifyFlags, kReadBarrierOption>(i,
+ pointer_size);
ArtMethod* new_method = visitor(method);
if (method != new_method) {
- dest->SetEmbeddedImTableEntry(i, new_method, pointer_size);
+ dest->SetEmbeddedImTableEntry<kVerifyFlags, kReadBarrierOption>(i,
+ new_method,
+ pointer_size);
}
}
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 8092db4fbf..79adfb65b1 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -449,7 +449,6 @@ class MANAGED Class FINAL : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-
bool IsArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -494,9 +493,11 @@ class MANAGED Class FINAL : public Object {
return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass();
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsInstantiable() SHARED_REQUIRES(Locks::mutator_lock_) {
return (!IsPrimitive() && !IsInterface() && !IsAbstract()) ||
- (IsAbstract() && IsArrayClass());
+ (IsAbstract() && IsArrayClass<kVerifyFlags, kReadBarrierOption>());
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -815,8 +816,10 @@ class MANAGED Class FINAL : public Object {
return MemberOffset(sizeof(Class));
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool ShouldHaveEmbeddedImtAndVTable() SHARED_REQUIRES(Locks::mutator_lock_) {
- return IsInstantiable();
+ return IsInstantiable<kVerifyFlags, kReadBarrierOption>();
}
bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_);
@@ -825,9 +828,13 @@ class MANAGED Class FINAL : public Object {
static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size);
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ArtMethod* GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -1020,6 +1027,8 @@ class MANAGED Class FINAL : public Object {
}
// Get the offset of the first reference static field. Other reference static fields follow.
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
MemberOffset GetFirstReferenceStaticFieldOffset(size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -1243,7 +1252,9 @@ class MANAGED Class FINAL : public Object {
// the corresponding entry in dest if visitor(obj) != obj to prevent dirty memory. Dest should be
// initialized to a copy of *this to prevent issues. Does not visit the ArtMethod and ArtField
// roots.
- template <typename Visitor>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
+ typename Visitor>
void FixupNativePointers(mirror::Class* dest, size_t pointer_size, const Visitor& visitor)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 605deac608..d6571f211a 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -43,8 +43,11 @@ class MANAGED IfTable FINAL : public ObjectArray<Object> {
return method_array;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
size_t GetMethodArrayCount(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
- auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray));
+ auto* method_array = down_cast<PointerArray*>(
+ Get<kVerifyFlags, kReadBarrierOption>((i * kMax) + kMethodArray));
return method_array == nullptr ? 0u : method_array->GetLength();
}
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 760de9ab40..eb391be406 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -255,16 +255,17 @@ inline Class* Object::AsClass() {
return down_cast<Class*>(this);
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsObjectArray() {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- return IsArrayInstance<kVerifyFlags>() &&
- !GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitive();
+ return IsArrayInstance<kVerifyFlags, kReadBarrierOption>() &&
+ !GetClass<kNewFlags, kReadBarrierOption>()->
+ template GetComponentType<kNewFlags, kReadBarrierOption>()->IsPrimitive();
}
-template<class T, VerifyObjectFlags kVerifyFlags>
+template<class T, VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline ObjectArray<T>* Object::AsObjectArray() {
- DCHECK(IsObjectArray<kVerifyFlags>());
+ DCHECK((IsObjectArray<kVerifyFlags, kReadBarrierOption>()));
return down_cast<ObjectArray<T>*>(this);
}
@@ -274,14 +275,14 @@ inline bool Object::IsArrayInstance() {
template IsArrayClass<kVerifyFlags, kReadBarrierOption>();
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsReferenceInstance() {
- return GetClass<kVerifyFlags>()->IsTypeOfReferenceClass();
+ return GetClass<kVerifyFlags, kReadBarrierOption>()->IsTypeOfReferenceClass();
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline Reference* Object::AsReference() {
- DCHECK(IsReferenceInstance<kVerifyFlags>());
+ DCHECK((IsReferenceInstance<kVerifyFlags, kReadBarrierOption>()));
return down_cast<Reference*>(this);
}
@@ -341,29 +342,31 @@ inline ShortArray* Object::AsShortSizedArray() {
return down_cast<ShortArray*>(this);
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsIntArray() {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- auto* component_type = GetClass<kVerifyFlags>()->GetComponentType();
+ mirror::Class* klass = GetClass<kVerifyFlags, kReadBarrierOption>();
+ mirror::Class* component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline IntArray* Object::AsIntArray() {
- DCHECK(IsIntArray<kVerifyFlags>());
+ DCHECK((IsIntArray<kVerifyFlags, kReadBarrierOption>()));
return down_cast<IntArray*>(this);
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsLongArray() {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- auto* component_type = GetClass<kVerifyFlags>()->GetComponentType();
+ mirror::Class* klass = GetClass<kVerifyFlags, kReadBarrierOption>();
+ mirror::Class* component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline LongArray* Object::AsLongArray() {
- DCHECK(IsLongArray<kVerifyFlags>());
+ DCHECK((IsLongArray<kVerifyFlags, kReadBarrierOption>()));
return down_cast<LongArray*>(this);
}
@@ -1063,7 +1066,7 @@ inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& v
// Presumably GC can happen when we are cross compiling, it should not cause performance
// problems to do pointer size logic.
MemberOffset field_offset = kIsStatic
- ? klass->GetFirstReferenceStaticFieldOffset(
+ ? klass->GetFirstReferenceStaticFieldOffset<kVerifyFlags, kReadBarrierOption>(
Runtime::Current()->GetClassLinker()->GetImagePointerSize())
: klass->GetFirstReferenceInstanceFieldOffset();
for (size_t i = 0u; i < num_reference_fields; ++i) {
@@ -1123,26 +1126,26 @@ inline void Object::VisitReferences(const Visitor& visitor,
visitor(this, ClassOffset(), false);
const uint32_t class_flags = klass->GetClassFlags<kVerifyNone>();
if (LIKELY(class_flags == kClassFlagNormal)) {
- DCHECK(!klass->IsVariableSize());
- VisitInstanceFieldsReferences(klass, visitor);
+ DCHECK((!klass->IsVariableSize<kVerifyFlags, kReadBarrierOption>()));
+ VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
DCHECK((!klass->IsClassClass<kVerifyFlags, kReadBarrierOption>()));
DCHECK(!klass->IsStringClass());
DCHECK(!klass->IsClassLoaderClass());
- DCHECK(!klass->IsArrayClass());
+ DCHECK((!klass->IsArrayClass<kVerifyFlags, kReadBarrierOption>()));
} else {
if ((class_flags & kClassFlagNoReferenceFields) == 0) {
DCHECK(!klass->IsStringClass());
if (class_flags == kClassFlagClass) {
- DCHECK(klass->IsClassClass());
- AsClass<kVerifyNone>()->VisitReferences<kVisitNativeRoots,
- kVerifyFlags,
- kReadBarrierOption>(klass, visitor);
+ DCHECK((klass->IsClassClass<kVerifyFlags, kReadBarrierOption>()));
+ mirror::Class* as_klass = AsClass<kVerifyNone, kReadBarrierOption>();
+ as_klass->VisitReferences<kVisitNativeRoots, kVerifyFlags, kReadBarrierOption>(klass,
+ visitor);
} else if (class_flags == kClassFlagObjectArray) {
DCHECK((klass->IsObjectArrayClass<kVerifyFlags, kReadBarrierOption>()));
- AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences(visitor);
+ AsObjectArray<mirror::Object, kVerifyNone, kReadBarrierOption>()->VisitReferences(visitor);
} else if ((class_flags & kClassFlagReference) != 0) {
- VisitInstanceFieldsReferences(klass, visitor);
- ref_visitor(klass, AsReference());
+ VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
+ ref_visitor(klass, AsReference<kVerifyFlags, kReadBarrierOption>());
} else if (class_flags == kClassFlagDexCache) {
mirror::DexCache* const dex_cache = AsDexCache<kVerifyFlags, kReadBarrierOption>();
dex_cache->VisitReferences<kVisitNativeRoots,
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index d635002d12..3f739df67b 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -159,9 +159,12 @@ class MANAGED LOCKABLE Object {
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
Class* AsClass() SHARED_REQUIRES(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
- template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<class T,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ObjectArray<T>* AsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -199,14 +202,18 @@ class MANAGED LOCKABLE Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ShortArray* AsShortSizedArray() SHARED_REQUIRES(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
IntArray* AsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
LongArray* AsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -230,9 +237,11 @@ class MANAGED LOCKABLE Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Throwable* AsThrowable() SHARED_REQUIRES(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
Reference* AsReference() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsWeakReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index bd4a9c1031..12bfe38e17 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -27,14 +27,6 @@ inline uint32_t Reference::ClassSize(size_t pointer_size) {
return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0, pointer_size);
}
-inline bool Reference::IsEnqueuable() {
- // Not using volatile reads as an optimization since this is only called with all the mutators
- // suspended.
- const Object* queue = GetFieldObject<mirror::Object>(QueueOffset());
- const Object* queue_next = GetFieldObject<mirror::Object>(QueueNextOffset());
- return queue != nullptr && queue_next == nullptr;
-}
-
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 5e467ab94a..3baa12e40b 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -75,9 +75,7 @@ class MANAGED Reference : public Object {
void ClearReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
}
- // Volatile read/write is not necessary since the java pending next is only accessed from
- // the java threads for cleared references. Once these cleared references have a null referent,
- // we never end up reading their pending next from the GC again.
+
Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetFieldObject<Reference>(PendingNextOffset());
}
@@ -91,14 +89,22 @@ class MANAGED Reference : public Object {
}
}
- bool IsEnqueued() SHARED_REQUIRES(Locks::mutator_lock_) {
- // Since the references are stored as cyclic lists it means that once enqueued, the pending
- // next is always non-null.
- return GetPendingNext() != nullptr;
+ // Returns true if the reference's pendingNext is null, indicating it is
+ // okay to process this reference.
+ //
+ // If pendingNext is not null, then one of the following cases holds:
+ // 1. The reference has already been enqueued to a java ReferenceQueue. In
+ // this case the referent should not be considered for reference processing
+ // ever again.
+ // 2. The reference is currently part of a list of references that may
+ // shortly be enqueued on a java ReferenceQueue. In this case the reference
+ // should not be processed again until and unless the reference has been
+ // removed from the list after having determined the reference is not ready
+ // to be enqueued on a java ReferenceQueue.
+ bool IsUnprocessed() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return GetPendingNext() == nullptr;
}
- bool IsEnqueuable() SHARED_REQUIRES(Locks::mutator_lock_);
-
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
static Class* GetJavaLangRefReference() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!java_lang_ref_Reference_.IsNull());
@@ -115,9 +121,9 @@ class MANAGED Reference : public Object {
}
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
- HeapReference<Reference> pending_next_; // Note this is Java volatile:
- HeapReference<Object> queue_; // Note this is Java volatile:
- HeapReference<Reference> queue_next_; // Note this is Java volatile:
+ HeapReference<Reference> pending_next_;
+ HeapReference<Object> queue_;
+ HeapReference<Reference> queue_next_;
HeapReference<Object> referent_; // Note this is Java volatile:
static GcRoot<Class> java_lang_ref_Reference_;
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 17306c9842..6b84c8faa2 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -108,7 +108,7 @@ bool InlineMethodAnalyser::AnalyseMethodCode(const DexFile::CodeItem* code_item,
switch (opcode) {
case Instruction::RETURN_VOID:
- if (method != nullptr) {
+ if (result != nullptr) {
result->opcode = kInlineOpNop;
result->flags = kInlineSpecial;
result->d.data = 0u;
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 19cf7593e4..0c3eb3b628 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -32,50 +32,61 @@ template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kAlway
inline MirrorType* ReadBarrier::Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
- if (with_read_barrier && kUseBakerReadBarrier) {
- // The higher bits of the rb_ptr, rb_ptr_high_bits (must be zero)
- // is used to create artificial data dependency from the is_gray
- // load to the ref field (ptr) load to avoid needing a load-load
- // barrier between the two.
- uintptr_t rb_ptr_high_bits;
- bool is_gray = HasGrayReadBarrierPointer(obj, &rb_ptr_high_bits);
- ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
- rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr));
- MirrorType* ref = ref_addr->AsMirrorPtr();
- MirrorType* old_ref = ref;
- if (is_gray) {
- // Slow-path.
- ref = reinterpret_cast<MirrorType*>(Mark(ref));
- // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
- // updates before us, but it's ok.
- if (kAlwaysUpdateField && ref != old_ref) {
- obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
- offset, old_ref, ref);
+ if (kUseReadBarrier && with_read_barrier) {
+ if (kIsDebugBuild) {
+ Thread* const self = Thread::Current();
+ if (self != nullptr) {
+ CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
}
}
- if (kEnableReadBarrierInvariantChecks) {
- CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer();
- }
- AssertToSpaceInvariant(obj, offset, ref);
- return ref;
- } else if (with_read_barrier && kUseBrooksReadBarrier) {
- // To be implemented.
- return ref_addr->AsMirrorPtr();
- } else if (with_read_barrier && kUseTableLookupReadBarrier) {
- MirrorType* ref = ref_addr->AsMirrorPtr();
- MirrorType* old_ref = ref;
- // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
- gc::Heap* heap = Runtime::Current()->GetHeap();
- if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
- ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
- // Update the field atomically. This may fail if mutator updates before us, but it's ok.
- if (ref != old_ref) {
- obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
- offset, old_ref, ref);
+ if (kUseBakerReadBarrier) {
+ // The higher bits of the rb_ptr, rb_ptr_high_bits (must be zero)
+ // is used to create artificial data dependency from the is_gray
+ // load to the ref field (ptr) load to avoid needing a load-load
+ // barrier between the two.
+ uintptr_t rb_ptr_high_bits;
+ bool is_gray = HasGrayReadBarrierPointer(obj, &rb_ptr_high_bits);
+ ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
+ rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr));
+ MirrorType* ref = ref_addr->AsMirrorPtr();
+ MirrorType* old_ref = ref;
+ if (is_gray) {
+ // Slow-path.
+ ref = reinterpret_cast<MirrorType*>(Mark(ref));
+ // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
+ // updates before us, but it's ok.
+ if (kAlwaysUpdateField && ref != old_ref) {
+ obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
+ offset, old_ref, ref);
+ }
+ }
+ if (kEnableReadBarrierInvariantChecks) {
+ CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer();
}
+ AssertToSpaceInvariant(obj, offset, ref);
+ return ref;
+ } else if (kUseBrooksReadBarrier) {
+ // To be implemented.
+ return ref_addr->AsMirrorPtr();
+ } else if (kUseTableLookupReadBarrier) {
+ MirrorType* ref = ref_addr->AsMirrorPtr();
+ MirrorType* old_ref = ref;
+ // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
+ ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
+ // Update the field atomically. This may fail if mutator updates before us, but it's ok.
+ if (ref != old_ref) {
+ obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
+ offset, old_ref, ref);
+ }
+ }
+ AssertToSpaceInvariant(obj, offset, ref);
+ return ref;
+ } else {
+ LOG(FATAL) << "Unexpected read barrier type";
+ UNREACHABLE();
}
- AssertToSpaceInvariant(obj, offset, ref);
- return ref;
} else {
// No read barrier.
return ref_addr->AsMirrorPtr();
@@ -87,32 +98,43 @@ inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
GcRootSource* gc_root_source) {
MirrorType* ref = *root;
const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
- if (with_read_barrier && kUseBakerReadBarrier) {
- // TODO: separate the read barrier code from the collector code more.
- Thread* self = Thread::Current();
- if (self != nullptr && self->GetIsGcMarking()) {
- ref = reinterpret_cast<MirrorType*>(Mark(ref));
+ if (kUseReadBarrier && with_read_barrier) {
+ if (kIsDebugBuild) {
+ Thread* const self = Thread::Current();
+ if (self != nullptr) {
+ CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
+ }
}
- AssertToSpaceInvariant(gc_root_source, ref);
- return ref;
- } else if (with_read_barrier && kUseBrooksReadBarrier) {
- // To be implemented.
- return ref;
- } else if (with_read_barrier && kUseTableLookupReadBarrier) {
- Thread* self = Thread::Current();
- if (self != nullptr &&
- self->GetIsGcMarking() &&
- Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
- MirrorType* old_ref = ref;
- ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
- // Update the field atomically. This may fail if mutator updates before us, but it's ok.
- if (ref != old_ref) {
- Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
- atomic_root->CompareExchangeStrongRelaxed(old_ref, ref);
+ if (kUseBakerReadBarrier) {
+ // TODO: separate the read barrier code from the collector code more.
+ Thread* self = Thread::Current();
+ if (self != nullptr && self->GetIsGcMarking()) {
+ ref = reinterpret_cast<MirrorType*>(Mark(ref));
+ }
+ AssertToSpaceInvariant(gc_root_source, ref);
+ return ref;
+ } else if (kUseBrooksReadBarrier) {
+ // To be implemented.
+ return ref;
+ } else if (kUseTableLookupReadBarrier) {
+ Thread* self = Thread::Current();
+ if (self != nullptr &&
+ self->GetIsGcMarking() &&
+ Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
+ MirrorType* old_ref = ref;
+ ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
+ // Update the field atomically. This may fail if mutator updates before us, but it's ok.
+ if (ref != old_ref) {
+ Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
+ atomic_root->CompareExchangeStrongRelaxed(old_ref, ref);
+ }
}
+ AssertToSpaceInvariant(gc_root_source, ref);
+ return ref;
+ } else {
+ LOG(FATAL) << "Unexpected read barrier type";
+ UNREACHABLE();
}
- AssertToSpaceInvariant(gc_root_source, ref);
- return ref;
} else {
return ref;
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 21241d240b..2abcd67c2d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2704,7 +2704,7 @@ class ReferenceMapVisitor : public StackVisitor {
// Visiting the declaring class is necessary so that we don't unload the class of a method that
// is executing. We need to ensure that the code stays mapped.
void VisitDeclaringClass(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
- mirror::Class* klass = method->GetDeclaringClassNoBarrier();
+ mirror::Class* klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
// klass can be null for runtime methods.
if (klass != nullptr) {
mirror::Object* new_ref = klass;
diff --git a/runtime/thread.h b/runtime/thread.h
index b25bcb29bf..d7887ca42f 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1067,6 +1067,14 @@ class Thread {
void InitStringEntryPoints();
+ void ModifyDebugDisallowReadBarrier(int8_t delta) {
+ debug_disallow_read_barrier_ += delta;
+ }
+
+ uint8_t GetDebugDisallowReadBarrierCount() const {
+ return debug_disallow_read_barrier_;
+ }
+
private:
explicit Thread(bool daemon);
~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
@@ -1446,6 +1454,9 @@ class Thread {
// Thread "interrupted" status; stays raised until queried or thrown.
bool interrupted_ GUARDED_BY(wait_mutex_);
+ // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
+ uint8_t debug_disallow_read_barrier_ = 0;
+
friend class Dbg; // For SetStateUnsafe.
friend class gc::collector::SemiSpace; // For getting stack traces.
friend class Runtime; // For CreatePeer.
@@ -1493,6 +1504,20 @@ class ScopedStackedShadowFramePusher {
DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
};
+// Only works for debug builds.
+class ScopedDebugDisallowReadBarriers {
+ public:
+ explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
+ self_->ModifyDebugDisallowReadBarrier(1);
+ }
+ ~ScopedDebugDisallowReadBarriers() {
+ self_->ModifyDebugDisallowReadBarrier(-1);
+ }
+
+ private:
+ Thread* const self_;
+};
+
std::ostream& operator<<(std::ostream& os, const Thread& thread);
std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index f6ee6a2b1a..6922564fb2 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -31,14 +31,14 @@ inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size,
const DexFile::Header& header)
: pointer_size_(pointer_size),
/* types_offset_ is always 0u, so it's constexpr */
- methods_offset_(types_offset_ +
- RoundUp(TypesSize(header.type_ids_size_), MethodsAlignment())),
- strings_offset_(methods_offset_ +
- RoundUp(MethodsSize(header.method_ids_size_), StringsAlignment())),
- fields_offset_(strings_offset_ +
- RoundUp(StringsSize(header.string_ids_size_), FieldsAlignment())),
- size_(fields_offset_ +
- RoundUp(FieldsSize(header.field_ids_size_), Alignment())) {
+ methods_offset_(
+ RoundUp(types_offset_ + TypesSize(header.type_ids_size_), MethodsAlignment())),
+ strings_offset_(
+ RoundUp(methods_offset_ + MethodsSize(header.method_ids_size_), StringsAlignment())),
+ fields_offset_(
+ RoundUp(strings_offset_ + StringsSize(header.string_ids_size_), FieldsAlignment())),
+ size_(
+ RoundUp(fields_offset_ + FieldsSize(header.field_ids_size_), Alignment())) {
DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
}