Remove intermediate mterp files from the repository.
Generate them in the build system properly.
Bug: 33375800
Test: check that the generated files are still identical
Change-Id: I6058c2d8d28586414f28dc024a3c0199d993552b
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 1cebb06..15ccb70 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -246,7 +246,7 @@
arm: {
srcs: [
"interpreter/mterp/mterp.cc",
- "interpreter/mterp/out/mterp_arm.S",
+ ":libart_mterp.arm",
"arch/arm/context_arm.cc",
"arch/arm/entrypoints_init_arm.cc",
"arch/arm/instruction_set_features_assembly_tests.S",
@@ -261,7 +261,7 @@
arm64: {
srcs: [
"interpreter/mterp/mterp.cc",
- "interpreter/mterp/out/mterp_arm64.S",
+ ":libart_mterp.arm64",
"arch/arm64/context_arm64.cc",
"arch/arm64/entrypoints_init_arm64.cc",
"arch/arm64/jni_entrypoints_arm64.S",
@@ -275,7 +275,7 @@
x86: {
srcs: [
"interpreter/mterp/mterp.cc",
- "interpreter/mterp/out/mterp_x86.S",
+ ":libart_mterp.x86",
"arch/x86/context_x86.cc",
"arch/x86/entrypoints_init_x86.cc",
"arch/x86/jni_entrypoints_x86.S",
@@ -290,7 +290,7 @@
// Note that the fault_handler_x86.cc is not a mistake. This file is
// shared between the x86 and x86_64 architectures.
"interpreter/mterp/mterp.cc",
- "interpreter/mterp/out/mterp_x86_64.S",
+ ":libart_mterp.x86_64",
"arch/x86_64/context_x86_64.cc",
"arch/x86_64/entrypoints_init_x86_64.cc",
"arch/x86_64/jni_entrypoints_x86_64.S",
@@ -304,7 +304,7 @@
mips: {
srcs: [
"interpreter/mterp/mterp.cc",
- "interpreter/mterp/out/mterp_mips.S",
+ ":libart_mterp.mips",
"arch/mips/context_mips.cc",
"arch/mips/entrypoints_init_mips.cc",
"arch/mips/jni_entrypoints_mips.S",
@@ -317,7 +317,7 @@
mips64: {
srcs: [
"interpreter/mterp/mterp.cc",
- "interpreter/mterp/out/mterp_mips64.S",
+ ":libart_mterp.mips64",
"arch/mips64/context_mips64.cc",
"arch/mips64/entrypoints_init_mips64.cc",
"arch/mips64/jni_entrypoints_mips64.S",
@@ -669,3 +669,51 @@
host_supported: true,
export_include_dirs: ["."],
}
+
+genrule {
+ name: "libart_mterp.arm",
+ out: ["mterp_arm.S"],
+ srcs: ["interpreter/mterp/arm/*.S"],
+ tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+ cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+ name: "libart_mterp.arm64",
+ out: ["mterp_arm64.S"],
+ srcs: ["interpreter/mterp/arm64/*.S"],
+ tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+ cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+ name: "libart_mterp.mips",
+ out: ["mterp_mips.S"],
+ srcs: ["interpreter/mterp/mips/*.S"],
+ tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+ cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+ name: "libart_mterp.mips64",
+ out: ["mterp_mips64.S"],
+ srcs: ["interpreter/mterp/mips64/*.S"],
+ tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+ cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+ name: "libart_mterp.x86",
+ out: ["mterp_x86.S"],
+ srcs: ["interpreter/mterp/x86/*.S"],
+ tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+ cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+ name: "libart_mterp.x86_64",
+ out: ["mterp_x86_64.S"],
+ srcs: ["interpreter/mterp/x86_64/*.S"],
+ tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+ cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
diff --git a/runtime/interpreter/mterp/common/gen_setup.py b/runtime/interpreter/mterp/common/gen_setup.py
index 48417e8..5790a94 100644
--- a/runtime/interpreter/mterp/common/gen_setup.py
+++ b/runtime/interpreter/mterp/common/gen_setup.py
@@ -46,7 +46,7 @@
write_line("")
opnum, opcode = None, None
-def generate():
+def generate(output_filename):
out.seek(0)
out.truncate()
write_line("/* DO NOT EDIT: This file was generated by gen-mterp.py. */")
@@ -70,6 +70,6 @@
out.seek(0)
# Squash consequtive empty lines.
text = re.sub(r"(\n\n)(\n)+", r"\1", out.read())
- with open('out/mterp_' + arch + '.S', 'w') as output_file:
+ with open(output_filename, 'w') as output_file:
output_file.write(text)
diff --git a/runtime/interpreter/mterp/gen_mterp.py b/runtime/interpreter/mterp/gen_mterp.py
index 98a98fc..ad6e836 100755
--- a/runtime/interpreter/mterp/gen_mterp.py
+++ b/runtime/interpreter/mterp/gen_mterp.py
@@ -14,14 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import sys, re
-from os import listdir
+import sys, re, os
from cStringIO import StringIO
+SCRIPT_DIR = os.path.dirname(sys.argv[0])
# This file is included verbatim at the start of the in-memory python script.
-SCRIPT_SETUP_CODE = "common/gen_setup.py"
-
-INTERP_DEFS_FILE = "../../../libdexfile/dex/dex_instruction_list.h" # need opcode list
+SCRIPT_SETUP_CODE = SCRIPT_DIR + "/common/gen_setup.py"
+INTERP_DEFS_FILE = SCRIPT_DIR + "/../../../libdexfile/dex/dex_instruction_list.h"
NUM_PACKED_OPCODES = 256
# Extract an ordered list of instructions from the VM sources. We use the
@@ -54,23 +53,20 @@
(?(1)\}) # Expect } if and only if { was present.
''', re.VERBOSE)
-def generate_script(arch, setup_code):
+def generate_script(output_filename, input_filenames):
# Create new python script and write the initial setup code.
script = StringIO() # File-like in-memory buffer.
script.write("# DO NOT EDIT: This file was generated by gen-mterp.py.\n")
- script.write('arch = "' + arch + '"\n')
- script.write(setup_code)
- opcodes = getOpcodeList()
+ script.write(open(SCRIPT_SETUP_CODE, "r").read())
script.write("def opcodes(is_alt):\n")
- for i in xrange(NUM_PACKED_OPCODES):
- script.write(' write_opcode({0}, "{1}", {1}, is_alt)\n'.format(i, opcodes[i]))
+ for i, opcode in enumerate(getOpcodeList()):
+ script.write(' write_opcode({0}, "{1}", {1}, is_alt)\n'.format(i, opcode))
- # Find all template files and translate them into python code.
- files = listdir(arch)
- for file in sorted(files):
- f = open(arch + "/" + file, "r")
+ # Read all template files and translate them into python code.
+ for input_filename in sorted(input_filenames):
+ lines = open(input_filename, "r").readlines()
indent = ""
- for line in f.readlines():
+ for line in lines:
line = line.rstrip()
if line.startswith("%"):
script.write(line.lstrip("%") + "\n")
@@ -83,16 +79,20 @@
line = line.replace("$$", "$")
script.write(indent + "write_line('''" + line + "''')\n")
script.write("\n")
- f.close()
- script.write('generate()\n')
+ script.write("generate('''" + output_filename + "''')\n")
script.seek(0)
return script.read()
-# Generate the script for each architecture and execute it.
-for arch in ["arm", "arm64", "mips", "mips64", "x86", "x86_64"]:
- with open(SCRIPT_SETUP_CODE, "r") as setup_code_file:
- script = generate_script(arch, setup_code_file.read())
- filename = "out/mterp_" + arch + ".py" # Name to report in error messages.
- # open(filename, "w").write(script) # Write the script to disk for debugging.
- exec(compile(script, filename, mode='exec'))
+if len(sys.argv) <= 3:
+ print("Usage: output_file input_file(s)")
+ sys.exit(1)
+
+# Generate the script and execute it.
+output_filename = sys.argv[1]
+input_filenames = sys.argv[2:]
+script_filename = output_filename + ".py"
+script = generate_script(output_filename, input_filenames)
+with open(script_filename, "w") as script_file:
+ script_file.write(script) # Write to disk for debugging.
+exec(compile(script, script_filename, mode='exec'))
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
deleted file mode 100644
index 6e2b1ac..0000000
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ /dev/null
@@ -1,11424 +0,0 @@
-/* DO NOT EDIT: This file was generated by gen-mterp.py. */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-/*
-ARM EABI general notes:
-
-r0-r3 hold first 4 args to a method; they are not preserved across method calls
-r4-r8 are available for general use
-r9 is given special treatment in some situations, but not for us
-r10 (sl) seems to be generally available
-r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
-r12 (ip) is scratch -- not preserved across method calls
-r13 (sp) should be managed carefully in case a signal arrives
-r14 (lr) must be preserved
-r15 (pc) can be tinkered with directly
-
-r0 holds returns of <= 4 bytes
-r0-r1 hold returns of 8 bytes, low word in r0
-
-Callee must save/restore r4+ (except r12) if it modifies them. If VFP
-is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
-s0-s15 (d0-d7, q0-a3) do not need to be.
-
-Stack is "full descending". Only the arguments that don't fit in the first 4
-registers are placed on the stack. "sp" points at the first stacked argument
-(i.e. the 5th arg).
-
-VFP: single-precision results in s0, double-precision results in d0.
-
-In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
-64-bit quantities (long long, double) must be 64-bit aligned.
-*/
-
-/*
-Mterp and ARM notes:
-
-The following registers have fixed assignments:
-
- reg nick purpose
- r4 rPC interpreted program counter, used for fetching instructions
- r5 rFP interpreted frame pointer, used for accessing locals and args
- r6 rSELF self (Thread) pointer
- r7 rINST first 16-bit code unit of current instruction
- r8 rIBASE interpreted instruction base pointer, used for computed goto
- r10 rPROFILE branch profiling countdown
- r11 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
-
-Macros are provided for common operations. Each macro MUST emit only
-one instruction to make instruction-counting easier. They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rPC r4
-#define CFI_DEX 4 // DWARF register number of the register holding dex-pc (xPC).
-#define CFI_TMP 0 // DWARF register number of the first argument register (r0).
-#define rFP r5
-#define rSELF r6
-#define rINST r7
-#define rIBASE r8
-#define rPROFILE r10
-#define rREFS r11
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
-.endm
-
-.macro EXPORT_DEX_PC tmp
- ldr \tmp, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
- str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
- sub \tmp, rPC, \tmp
- asr \tmp, #1
- str \tmp, [rFP, #OFF_FP_DEX_PC]
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINST. Does not advance rPC.
- */
-.macro FETCH_INST
- ldrh rINST, [rPC]
-.endm
-
-/*
- * Fetch the next instruction from the specified offset. Advances rPC
- * to point to the next instruction. "_count" is in 16-bit code units.
- *
- * Because of the limited size of immediate constants on ARM, this is only
- * suitable for small forward movements (i.e. don't try to implement "goto"
- * with this).
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
- ldrh rINST, [rPC, #((\count)*2)]!
-.endm
-
-/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to rPC and rINST).
- */
-.macro PREFETCH_ADVANCE_INST dreg, sreg, count
- ldrh \dreg, [\sreg, #((\count)*2)]!
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
- * rINST ahead of possible exception point. Be sure to manually advance rPC
- * later.
- */
-.macro PREFETCH_INST count
- ldrh rINST, [rPC, #((\count)*2)]
-.endm
-
-/* Advance rPC by some number of code units. */
-.macro ADVANCE count
- add rPC, #((\count)*2)
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg. Updates
- * rPC to point to the next instruction. "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.
- *
- * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
- * bits that hold the shift distance are used for the half/byte/sign flags.
- * In some cases we can pre-double _reg for free, so we require a byte offset
- * here.
- */
-.macro FETCH_ADVANCE_INST_RB reg
- ldrh rINST, [rPC, \reg]!
-.endm
-
-/*
- * Fetch a half-word code unit from an offset past the current PC. The
- * "_count" value is in 16-bit code units. Does not advance rPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-.macro FETCH reg, count
- ldrh \reg, [rPC, #((\count)*2)]
-.endm
-
-.macro FETCH_S reg, count
- ldrsh \reg, [rPC, #((\count)*2)]
-.endm
-
-/*
- * Fetch one byte from an offset past the current PC. Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-.macro FETCH_B reg, count, byte
- ldrb \reg, [rPC, #((\count)*2+(\byte))]
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
- and \reg, rINST, #255
-.endm
-
-/*
- * Put the prefetched instruction's opcode field into the specified register.
- */
-.macro GET_PREFETCHED_OPCODE oreg, ireg
- and \oreg, \ireg, #255
-.endm
-
-/*
- * Begin executing the opcode in _reg. Because this only jumps within the
- * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
- */
-.macro GOTO_OPCODE reg
- add pc, rIBASE, \reg, lsl #7
-.endm
-.macro GOTO_OPCODE_BASE base,reg
- add pc, \base, \reg, lsl #7
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-.macro GET_VREG reg, vreg
- ldr \reg, [rFP, \vreg, lsl #2]
-.endm
-.macro SET_VREG reg, vreg
- str \reg, [rFP, \vreg, lsl #2]
- mov \reg, #0
- str \reg, [rREFS, \vreg, lsl #2]
-.endm
-.macro SET_VREG_OBJECT reg, vreg, tmpreg
- str \reg, [rFP, \vreg, lsl #2]
- str \reg, [rREFS, \vreg, lsl #2]
-.endm
-.macro SET_VREG_SHADOW reg, vreg
- str \reg, [rREFS, \vreg, lsl #2]
-.endm
-
-/*
- * Clear the corresponding shadow regs for a vreg pair
- */
-.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
- mov \tmp1, #0
- add \tmp2, \vreg, #1
- SET_VREG_SHADOW \tmp1, \vreg
- SET_VREG_SHADOW \tmp1, \tmp2
-.endm
-
-/*
- * Convert a virtual register index into an address.
- */
-.macro VREG_INDEX_TO_ADDR reg, vreg
- add \reg, rFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-.endm
-
-/*
- * cfi support macros.
- */
-.macro ENTRY name
- .arm
- .type \name, #function
- .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
- .global \name
- /* Cache alignment for function entry */
- .balign 16
-\name:
- .cfi_startproc
- .fnstart
-.endm
-
-.macro END name
- .fnend
- .cfi_endproc
- .size \name, .-\name
-.endm
-
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
- .text
- .align 2
-
-/*
- * On entry:
- * r0 Thread* self/
- * r1 insns_
- * r2 ShadowFrame
- * r3 JValue* result_register
- *
- */
-
-ENTRY ExecuteMterpImpl
- stmfd sp!, {r3-r10,fp,lr} @ save 10 regs, (r3 just to align 64)
- .cfi_adjust_cfa_offset 40
- .cfi_rel_offset r3, 0
- .cfi_rel_offset r4, 4
- .cfi_rel_offset r5, 8
- .cfi_rel_offset r6, 12
- .cfi_rel_offset r7, 16
- .cfi_rel_offset r8, 20
- .cfi_rel_offset r9, 24
- .cfi_rel_offset r10, 28
- .cfi_rel_offset fp, 32
- .cfi_rel_offset lr, 36
-
- /* Remember the return register */
- str r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
-
- /* Remember the dex instruction pointer */
- str r1, [r2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
-
- /* set up "named" registers */
- mov rSELF, r0
- ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
- add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to vregs.
- VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame
- ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
- add rPC, r1, r0, lsl #1 @ Create direct pointer to 1st dex opcode
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-
- /* Set up for backwards branches & osr profiling */
- ldr r0, [rFP, #OFF_FP_METHOD]
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rSELF
- bl MterpSetUpHotnessCountdown
- mov rPROFILE, r0 @ Starting hotness countdown to rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST @ load rINST from rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
- /* NOTE: no fallthrough */
-
- .type artMterpAsmInstructionStart, #object
- .hidden artMterpAsmInstructionStart
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_op_nop: /* 0x00 */
- FETCH_ADVANCE_INST 1 @ advance to next instr, load rINST
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- GOTO_OPCODE ip @ execute it
-
-/* ------------------------------ */
- .balign 128
-.L_op_move: /* 0x01 */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B from 15:12
- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[B]
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- .if 0
- SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
- .else
- SET_VREG r2, r0 @ fp[A]<- r2
- .endif
- GOTO_OPCODE ip @ execute next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_from16: /* 0x02 */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH r1, 1 @ r1<- BBBB
- mov r0, rINST, lsr #8 @ r0<- AA
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[BBBB]
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if 0
- SET_VREG_OBJECT r2, r0 @ fp[AA]<- r2
- .else
- SET_VREG r2, r0 @ fp[AA]<- r2
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_16: /* 0x03 */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH r1, 2 @ r1<- BBBB
- FETCH r0, 1 @ r0<- AAAA
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[BBBB]
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if 0
- SET_VREG_OBJECT r2, r0 @ fp[AAAA]<- r2
- .else
- SET_VREG r2, r0 @ fp[AAAA]<- r2
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide: /* 0x04 */
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- fp[B]
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r2, {r0-r1} @ fp[A]<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_from16: /* 0x05 */
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH r3, 1 @ r3<- BBBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
- VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
- ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r2, {r0-r1} @ fp[AA]<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_16: /* 0x06 */
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH r3, 2 @ r3<- BBBB
- FETCH r2, 1 @ r2<- AAAA
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
- VREG_INDEX_TO_ADDR lr, r2 @ r2<- &fp[AAAA]
- ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r2, r3, ip @ Zero out the shadow regs
- stmia lr, {r0-r1} @ fp[AAAA]<- r0/r1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object: /* 0x07 */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B from 15:12
- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[B]
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- .if 1
- SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
- .else
- SET_VREG r2, r0 @ fp[A]<- r2
- .endif
- GOTO_OPCODE ip @ execute next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_from16: /* 0x08 */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH r1, 1 @ r1<- BBBB
- mov r0, rINST, lsr #8 @ r0<- AA
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[BBBB]
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if 1
- SET_VREG_OBJECT r2, r0 @ fp[AA]<- r2
- .else
- SET_VREG r2, r0 @ fp[AA]<- r2
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_16: /* 0x09 */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH r1, 2 @ r1<- BBBB
- FETCH r0, 1 @ r0<- AAAA
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[BBBB]
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if 1
- SET_VREG_OBJECT r2, r0 @ fp[AAAA]<- r2
- .else
- SET_VREG r2, r0 @ fp[AAAA]<- r2
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result: /* 0x0a */
- /* for: move-result, move-result-object */
- /* op vAA */
- mov r2, rINST, lsr #8 @ r2<- AA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ldr r0, [rFP, #OFF_FP_RESULT_REGISTER] @ get pointer to result JType.
- ldr r0, [r0] @ r0 <- result.i.
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if 0
- SET_VREG_OBJECT r0, r2, r1 @ fp[AA]<- r0
- .else
- SET_VREG r0, r2 @ fp[AA]<- r0
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_wide: /* 0x0b */
- /* move-result-wide vAA */
- mov rINST, rINST, lsr #8 @ rINST<- AA
- ldr r3, [rFP, #OFF_FP_RESULT_REGISTER]
- VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
- ldmia r3, {r0-r1} @ r0/r1<- retval.j
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- stmia r2, {r0-r1} @ fp[AA]<- r0/r1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_object: /* 0x0c */
- /* for: move-result, move-result-object */
- /* op vAA */
- mov r2, rINST, lsr #8 @ r2<- AA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ldr r0, [rFP, #OFF_FP_RESULT_REGISTER] @ get pointer to result JType.
- ldr r0, [r0] @ r0 <- result.i.
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if 1
- SET_VREG_OBJECT r0, r2, r1 @ fp[AA]<- r0
- .else
- SET_VREG r0, r2 @ fp[AA]<- r0
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_exception: /* 0x0d */
- /* move-exception vAA */
- mov r2, rINST, lsr #8 @ r2<- AA
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- mov r1, #0 @ r1<- 0
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- SET_VREG_OBJECT r3, r2 @ fp[AA]<- exception obj
- GET_INST_OPCODE ip @ extract opcode from rINST
- str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ clear exception
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void: /* 0x0e */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r0, #0
- mov r1, #0
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return: /* 0x0f */
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA
- mov r1, #0
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_wide: /* 0x10 */
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r2, rINST, lsr #8 @ r2<- AA
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA]
- ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_object: /* 0x11 */
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA
- mov r1, #0
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_4: /* 0x12 */
- /* const/4 vA, #+B */
- sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
- ubfx r0, rINST, #8, #4 @ r0<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- SET_VREG r1, r0 @ fp[A]<- r1
- GOTO_OPCODE ip @ execute next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_16: /* 0x13 */
- /* const/16 vAA, #+BBBB */
- FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r3 @ vAA<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const: /* 0x14 */
- /* const vAA, #+BBBBbbbb */
- mov r3, rINST, lsr #8 @ r3<- AA
- FETCH r0, 1 @ r0<- bbbb (low)
- FETCH r1, 2 @ r1<- BBBB (high)
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r3 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_high16: /* 0x15 */
- /* const/high16 vAA, #+BBBB0000 */
- FETCH r0, 1 @ r0<- 0000BBBB (zero-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- mov r0, r0, lsl #16 @ r0<- BBBB0000
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r3 @ vAA<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_16: /* 0x16 */
- /* const-wide/16 vAA, #+BBBB */
- FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- mov r1, r0, asr #31 @ r1<- ssssssss
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_32: /* 0x17 */
- /* const-wide/32 vAA, #+BBBBbbbb */
- FETCH r0, 1 @ r0<- 0000bbbb (low)
- mov r3, rINST, lsr #8 @ r3<- AA
- FETCH_S r2, 2 @ r2<- ssssBBBB (high)
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
- CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
- mov r1, r0, asr #31 @ r1<- ssssssss
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide: /* 0x18 */
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- FETCH r0, 1 @ r0<- bbbb (low)
- FETCH r1, 2 @ r1<- BBBB (low middle)
- FETCH r2, 3 @ r2<- hhhh (high middle)
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
- FETCH r3, 4 @ r3<- HHHH (high)
- mov r9, rINST, lsr #8 @ r9<- AA
- orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
- CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
- FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_high16: /* 0x19 */
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- FETCH r1, 1 @ r1<- 0000BBBB (zero-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- mov r0, #0 @ r0<- 00000000
- mov r1, r1, lsl #16 @ r1<- BBBB0000
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string: /* 0x1a */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstString
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 @ load rINST
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
- /* const/string vAA, String@BBBBBBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- bbbb (low)
- FETCH r2, 2 @ r2<- BBBB (high)
- mov r1, rINST, lsr #8 @ r1<- AA
- orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 3 @ advance rPC
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 3 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_class: /* 0x1c */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstClass
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstClass @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 @ load rINST
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_enter: /* 0x1d */
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA (object)
- mov r1, rSELF @ r1<- self
- bl artLockObjectFromCode
- cmp r0, #0
- bne MterpException
- FETCH_ADVANCE_INST 1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_exit: /* 0x1e */
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA (object)
- mov r1, rSELF @ r0<- self
- bl artUnlockObjectFromCode @ r0<- success for unlock(self, obj)
- cmp r0, #0 @ failed?
- bne MterpException
- FETCH_ADVANCE_INST 1 @ before throw: advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_check_cast: /* 0x1f */
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
- mov r3, rSELF @ r3<- self
- bl MterpCheckCast @ (index, &obj, method, self)
- PREFETCH_INST 2
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_instance_of: /* 0x20 */
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC
- FETCH r0, 1 @ r0<- CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
- mov r3, rSELF @ r3<- self
- bl MterpInstanceOf @ (index, &obj, method, self)
- ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r9, rINST, #8, #4 @ r9<- A
- PREFETCH_INST 2
- cmp r1, #0 @ exception pending?
- bne MterpException
- ADVANCE 2 @ advance rPC
- SET_VREG r0, r9 @ vA<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_array_length: /* 0x21 */
- /*
- * Return the length of an array.
- */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r0, r1 @ r0<- vB (object ref)
- cmp r0, #0 @ is object null?
- beq common_errNullObject @ yup, fail
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- array length
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r3, r2 @ vB<- length
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_instance: /* 0x22 */
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rSELF
- mov r2, rINST
- bl MterpNewInstance @ (shadow_frame, self, inst_data)
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_array: /* 0x23 */
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- mov r3, rSELF
- bl MterpNewArray
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array: /* 0x24 */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArray
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rSELF
- bl MterpFilledNewArray
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArrayRange
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rSELF
- bl MterpFilledNewArrayRange
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_fill_array_data: /* 0x26 */
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- bbbb (lo)
- FETCH r1, 2 @ r1<- BBBB (hi)
- mov r3, rINST, lsr #8 @ r3<- AA
- orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
- GET_VREG r0, r3 @ r0<- vAA (array object)
- add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
- bl MterpFillArrayData @ (obj, payload)
- cmp r0, #0 @ 0 means an exception is thrown
- beq MterpPossibleException @ exception?
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_throw: /* 0x27 */
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r1, r2 @ r1<- vAA (exception object)
- cmp r1, #0 @ null object?
- beq common_errNullObject @ yes, throw an NPE instead
- str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ thread->exception<- obj
- b MterpException
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto: /* 0x28 */
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_16: /* 0x29 */
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- FETCH_S rINST, 1 @ rINST<- ssssAAAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_32: /* 0x2a */
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0". Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- FETCH r0, 1 @ r0<- aaaa (lo)
- FETCH r3, 2 @ r1<- AAAA (hi)
- orrs rINST, r0, r3, lsl #16 @ rINST<- AAAAaaaa
- b MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_packed_switch: /* 0x2b */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH r0, 1 @ r0<- bbbb (lo)
- FETCH r1, 2 @ r1<- BBBB (hi)
- mov r3, rINST, lsr #8 @ r3<- AA
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
- GET_VREG r1, r3 @ r1<- vAA
- add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
- bl MterpDoPackedSwitch @ r0<- code-unit branch offset
- movs rINST, r0
- b MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_sparse_switch: /* 0x2c */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH r0, 1 @ r0<- bbbb (lo)
- FETCH r1, 2 @ r1<- BBBB (hi)
- mov r3, rINST, lsr #8 @ r3<- AA
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
- GET_VREG r1, r3 @ r1<- vAA
- add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
- bl MterpDoSparseSwitch @ r0<- code-unit branch offset
- movs rINST, r0
- b MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_float: /* 0x2d */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x > y) {
- * return 1;
- * } else if (x < y) {
- * return -1;
- * } else {
- * return -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- flds s0, [r2] @ s0<- vBB
- flds s1, [r3] @ s1<- vCC
- vcmpe.f32 s0, s1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mvn r0, #0 @ r0<- -1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- movgt r0, #1 @ (greater than) r1<- 1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_float: /* 0x2e */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return 1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- flds s0, [r2] @ s0<- vBB
- flds s1, [r3] @ s1<- vCC
- vcmpe.f32 s0, s1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r0, #1 @ r0<- 1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- mvnmi r0, #0 @ (less than) r1<- -1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_double: /* 0x2f */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x > y) {
- * return 1;
- * } else if (x < y) {
- * return -1;
- * } else {
- * return -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- fldd d0, [r2] @ d0<- vBB
- fldd d1, [r3] @ d1<- vCC
- vcmpe.f64 d0, d1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mvn r0, #0 @ r0<- -1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- movgt r0, #1 @ (greater than) r1<- 1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_double: /* 0x30 */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return 1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- fldd d0, [r2] @ d0<- vBB
- fldd d1, [r3] @ d1<- vCC
- vcmpe.f64 d0, d1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r0, #1 @ r0<- 1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- mvnmi r0, #0 @ (less than) r1<- -1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmp_long: /* 0x31 */
- /*
- * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
- /* cmp-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- cmp r0, r2
- sbcs ip, r1, r3 @ Sets correct CCs for checking LT (but not EQ/NE)
- mov ip, #0
- mvnlt ip, #0 @ -1
- cmpeq r0, r2 @ For correct EQ/NE, we may need to repeat the first CMP
- orrne ip, #1
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG ip, r9 @ vAA<- ip
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eq: /* 0x32 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- beq MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ne: /* 0x33 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- bne MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lt: /* 0x34 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- blt MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ge: /* 0x35 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- bge MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gt: /* 0x36 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- bgt MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_le: /* 0x37 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- ble MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eqz: /* 0x38 */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- beq MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_nez: /* 0x39 */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- bne MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ltz: /* 0x3a */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- blt MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gez: /* 0x3b */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- bge MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gtz: /* 0x3c */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- bgt MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lez: /* 0x3d */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- ble MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3e: /* 0x3e */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3f: /* 0x3f */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_40: /* 0x40 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_41: /* 0x41 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_42: /* 0x42 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_43: /* 0x43 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget: /* 0x44 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldr r2, [r0, #MIRROR_INT_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r2, r9 @ vAA<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_wide: /* 0x45 */
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
- */
- /* aget-wide vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_object: /* 0x46 */
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- EXPORT_PC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- bl artAGetObjectFromMterp @ (array, index)
- ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
- PREFETCH_INST 2
- cmp r1, #0
- bne MterpException
- SET_VREG_OBJECT r0, r9
- ADVANCE 2
- GET_INST_OPCODE ip
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_boolean: /* 0x47 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldrb r2, [r0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r2, r9 @ vAA<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_byte: /* 0x48 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldrsb r2, [r0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r2, r9 @ vAA<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_char: /* 0x49 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldrh r2, [r0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r2, r9 @ vAA<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_short: /* 0x4a */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldrsh r2, [r0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r2, r9 @ vAA<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput: /* 0x4b */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r9 @ r2<- vAA
- GET_INST_OPCODE ip @ extract opcode from rINST
- str r2, [r0, #MIRROR_INT_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_wide: /* 0x4c */
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
- */
- /* aput-wide vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
- GET_INST_OPCODE ip @ extract opcode from rINST
- strd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_object: /* 0x4d */
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- bl MterpAputObject
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_boolean: /* 0x4e */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r9 @ r2<- vAA
- GET_INST_OPCODE ip @ extract opcode from rINST
- strb r2, [r0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_byte: /* 0x4f */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r9 @ r2<- vAA
- GET_INST_OPCODE ip @ extract opcode from rINST
- strb r2, [r0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_char: /* 0x50 */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r9 @ r2<- vAA
- GET_INST_OPCODE ip @ extract opcode from rINST
- strh r2, [r0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_short: /* 0x51 */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r9 @ r2<- vAA
- GET_INST_OPCODE ip @ extract opcode from rINST
- strh r2, [r0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget: /* 0x52 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU32
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIGetU32
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide: /* 0x53 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU64
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIGetU64
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object: /* 0x54 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetObj
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIGetObj
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean: /* 0x55 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU8
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIGetU8
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte: /* 0x56 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetI8
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIGetI8
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char: /* 0x57 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU16
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIGetU16
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short: /* 0x58 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetI16
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIGetI16
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput: /* 0x59 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU32
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIPutU32
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide: /* 0x5a */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU64
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIPutU64
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object: /* 0x5b */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutObj
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIPutObj
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean: /* 0x5c */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU8
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIPutU8
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte: /* 0x5d */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutI8
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIPutI8
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char: /* 0x5e */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU16
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIPutU16
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short: /* 0x5f */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutI16
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpIPutI16
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget: /* 0x60 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU32
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSGetU32
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_wide: /* 0x61 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU64
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSGetU64
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_object: /* 0x62 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetObj
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSGetObj
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_boolean: /* 0x63 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU8
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSGetU8
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_byte: /* 0x64 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetI8
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSGetI8
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_char: /* 0x65 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU16
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSGetU16
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_short: /* 0x66 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetI16
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSGetI16
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput: /* 0x67 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU32
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSPutU32
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_wide: /* 0x68 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU64
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSPutU64
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_object: /* 0x69 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutObj
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSPutObj
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_boolean: /* 0x6a */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU8
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSPutU8
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_byte: /* 0x6b */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutI8
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSPutI8
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_char: /* 0x6c */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU16
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSPutU16
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_short: /* 0x6d */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutI16
- mov r0, rPC @ arg0: Instruction* inst
- mov r1, rINST @ arg1: uint16_t inst_data
- add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
- mov r3, rSELF @ arg3: Thread* self
- PREFETCH_INST 2 @ prefetch next opcode
- bl MterpSPutI16
- cmp r0, #0
- beq MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual: /* 0x6e */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtual
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeVirtual
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
- /*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super: /* 0x6f */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuper
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeSuper
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
- /*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct: /* 0x70 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirect
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeDirect
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static: /* 0x71 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStatic
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeStatic
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface: /* 0x72 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterface
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeInterface
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
- /*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r0, #0
- mov r1, #0
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeVirtualRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super_range: /* 0x75 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuperRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeSuperRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirectRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeDirectRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static_range: /* 0x77 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStaticRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeStaticRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterfaceRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeInterfaceRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_79: /* 0x79 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_7a: /* 0x7a */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_int: /* 0x7b */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- rsb r0, r0, #0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_int: /* 0x7c */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- mvn r0, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_long: /* 0x7d */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0/r1".
- * This could be an ARM instruction or a function call.
- *
- * For: neg-long, not-long, neg-double, long-to-double, double-to-long
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- rsbs r0, r0, #0 @ optional op; may set condition codes
- rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-11 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_long: /* 0x7e */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0/r1".
- * This could be an ARM instruction or a function call.
- *
- * For: neg-long, not-long, neg-double, long-to-double, double-to-long
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- mvn r0, r0 @ optional op; may set condition codes
- mvn r1, r1 @ r0/r1<- op, r2-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-11 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_float: /* 0x7f */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_double: /* 0x80 */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0/r1".
- * This could be an ARM instruction or a function call.
- *
- * For: neg-long, not-long, neg-double, long-to-double, double-to-long
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- @ optional op; may set condition codes
- add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-11 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_long: /* 0x81 */
- /*
- * Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op r0", where
- * "result" is a 64-bit quantity in r0/r1.
- *
- * For: int-to-long, int-to-double, float-to-long, float-to-double
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- GET_VREG r0, r3 @ r0<- vB
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- @ optional op; may set condition codes
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- mov r1, r0, asr #31 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 9-10 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_float: /* 0x82 */
- /*
- * Generic 32-bit unary floating-point operation. Provide an "instr"
- * line that specifies an instruction that performs "s1 = op s0".
- *
- * for: int-to-float, float-to-int
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fsitos s1, s0 @ s1<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fsts s1, [r9] @ vA<- s1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_double: /* 0x83 */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op s0".
- *
- * For: int-to-double, float-to-double
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fsitod d0, s0 @ d0<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fstd d0, [r9] @ vA<- d0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B from 15:12
- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[B]
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- .if 0
- SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
- .else
- SET_VREG r2, r0 @ fp[A]<- r2
- .endif
- GOTO_OPCODE ip @ execute next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_float: /* 0x85 */
- /*
- * Generic 64bit-to-32bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op r0/r1", where
- * "result" is a 32-bit quantity in r0.
- *
- * For: long-to-float, double-to-int, double-to-float
- *
- * (This would work for long-to-int, but that instruction is actually
- * an exact match for op_move.)
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl __aeabi_l2f @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 9-10 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_double: /* 0x86 */
- /*
- * Specialised 64-bit floating point operation.
- *
- * Note: The result will be returned in d2.
- *
- * For: long-to-double
- */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- vldr d0, [r3] @ d0<- vAA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- vcvt.f64.s32 d1, s1 @ d1<- (double)(vAAh)
- vcvt.f64.u32 d2, s0 @ d2<- (double)(vAAl)
- vldr d3, constvalop_long_to_double
- vmla.f64 d2, d1, d3 @ d2<- vAAh*2^32 + vAAl
-
- GET_INST_OPCODE ip @ extract opcode from rINST
- vstr.64 d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
- /* literal pool helper */
-constvalop_long_to_double:
- .8byte 0x41f0000000000000
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_int: /* 0x87 */
- /*
- * Generic 32-bit unary floating-point operation. Provide an "instr"
- * line that specifies an instruction that performs "s1 = op s0".
- *
- * for: int-to-float, float-to-int
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ftosizs s1, s0 @ s1<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fsts s1, [r9] @ vA<- s1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_long: /* 0x88 */
- /*
- * Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op r0", where
- * "result" is a 64-bit quantity in r0/r1.
- *
- * For: int-to-long, int-to-double, float-to-long, float-to-double
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- GET_VREG r0, r3 @ r0<- vB
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- @ optional op; may set condition codes
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- bl f2l_doconv @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 9-10 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_double: /* 0x89 */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op s0".
- *
- * For: int-to-double, float-to-double
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- vcvt.f64.f32 d0, s0 @ d0<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fstd d0, [r9] @ vA<- d0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_int: /* 0x8a */
- /*
- * Generic 64bit-to-32bit unary floating point operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op d0".
- *
- * For: double-to-int, double-to-float
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- fldd d0, [r3] @ d0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ftosizd s0, d0 @ s0<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fsts s0, [r9] @ vA<- s0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_long: /* 0x8b */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0/r1".
- * This could be an ARM instruction or a function call.
- *
- * For: neg-long, not-long, neg-double, long-to-double, double-to-long
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl d2l_doconv @ r0/r1<- op, r2-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-11 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_float: /* 0x8c */
- /*
- * Generic 64bit-to-32bit unary floating point operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op d0".
- *
- * For: double-to-int, double-to-float
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- fldd d0, [r3] @ d0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- vcvt.f32.f64 s0, d0 @ s0<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fsts s0, [r9] @ vA<- s0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_byte: /* 0x8d */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- sxtb r0, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_char: /* 0x8e */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- uxth r0, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_short: /* 0x8f */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- sxth r0, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int: /* 0x90 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- add r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int: /* 0x91 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- sub r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int: /* 0x92 */
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- mul r0, r1, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int: /* 0x93 */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int
- *
- */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int: /* 0x94 */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int
- *
- */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op, r0-r2 changed
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int: /* 0x95 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- and r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int: /* 0x96 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- orr r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int: /* 0x97 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- eor r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int: /* 0x98 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- and r1, r1, #31 @ optional op; may set condition codes
- mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int: /* 0x99 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- and r1, r1, #31 @ optional op; may set condition codes
- mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int: /* 0x9a */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- and r1, r1, #31 @ optional op; may set condition codes
- mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long: /* 0x9b */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- adds r0, r0, r2 @ optional op; may set condition codes
- adc r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long: /* 0x9c */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- subs r0, r0, r2 @ optional op; may set condition codes
- sbc r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long: /* 0x9d */
- /*
- * Signed 64-bit integer multiply.
- *
- * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
- * WX
- * x YZ
- * --------
- * ZW ZX
- * YW YX
- *
- * The low word of the result holds ZX, the high word holds
- * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
- * it doesn't fit in the low 64 bits.
- *
- * Unlike most ARM math operations, multiply instructions have
- * restrictions on using the same register more than once (Rd and Rm
- * cannot be the same).
- */
- /* mul-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- mul ip, r2, r1 @ ip<- ZxW
- umull r1, lr, r2, r0 @ r1/lr <- ZxX
- mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
- mov r0, rINST, lsr #8 @ r0<- AA
- add r2, r2, lr @ r2<- lr + low(ZxW + (YxX))
- CLEAR_SHADOW_PAIR r0, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r0, {r1-r2 } @ vAA/vAA+1<- r1/r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long: /* 0x9e */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 1
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl __aeabi_ldivmod @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long: /* 0x9f */
-/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 1
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl __aeabi_ldivmod @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long: /* 0xa0 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- and r0, r0, r2 @ optional op; may set condition codes
- and r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long: /* 0xa1 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- orr r0, r0, r2 @ optional op; may set condition codes
- orr r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long: /* 0xa2 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- eor r0, r0, r2 @ optional op; may set condition codes
- eor r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long: /* 0xa3 */
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shl-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r3, r0, #255 @ r3<- BB
- mov r0, r0, lsr #8 @ r0<- CC
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
- GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- and r2, r2, #63 @ r2<- r2 & 0x3f
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- mov r1, r1, asl r2 @ r1<- r1 << r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r0, r0, asl r2 @ r0<- r0 << r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long: /* 0xa4 */
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shr-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r3, r0, #255 @ r3<- BB
- mov r0, r0, lsr #8 @ r0<- CC
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
- GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- and r2, r2, #63 @ r0<- r0 & 0x3f
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r1, r1, asr r2 @ r1<- r1 >> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long: /* 0xa5 */
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* ushr-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r3, r0, #255 @ r3<- BB
- mov r0, r0, lsr #8 @ r0<- CC
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
- GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- and r2, r2, #63 @ r0<- r0 & 0x3f
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r1, r1, lsr r2 @ r1<- r1 >>> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float: /* 0xa6 */
- /*
- * Generic 32-bit floating-point operation. Provide an "instr" line that
- * specifies an instruction that performs "s2 = s0 op s1". Because we
- * use the "softfp" ABI, this must be an instruction, not a function call.
- *
- * For: add-float, sub-float, mul-float, div-float
- */
- /* floatop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- flds s1, [r3] @ s1<- vCC
- flds s0, [r2] @ s0<- vBB
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fadds s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float: /* 0xa7 */
- /*
- * Generic 32-bit floating-point operation. Provide an "instr" line that
- * specifies an instruction that performs "s2 = s0 op s1". Because we
- * use the "softfp" ABI, this must be an instruction, not a function call.
- *
- * For: add-float, sub-float, mul-float, div-float
- */
- /* floatop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- flds s1, [r3] @ s1<- vCC
- flds s0, [r2] @ s0<- vBB
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fsubs s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float: /* 0xa8 */
- /*
- * Generic 32-bit floating-point operation. Provide an "instr" line that
- * specifies an instruction that performs "s2 = s0 op s1". Because we
- * use the "softfp" ABI, this must be an instruction, not a function call.
- *
- * For: add-float, sub-float, mul-float, div-float
- */
- /* floatop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- flds s1, [r3] @ s1<- vCC
- flds s0, [r2] @ s0<- vBB
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fmuls s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float: /* 0xa9 */
- /*
- * Generic 32-bit floating-point operation. Provide an "instr" line that
- * specifies an instruction that performs "s2 = s0 op s1". Because we
- * use the "softfp" ABI, this must be an instruction, not a function call.
- *
- * For: add-float, sub-float, mul-float, div-float
- */
- /* floatop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- flds s1, [r3] @ s1<- vCC
- flds s0, [r2] @ s0<- vBB
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fdivs s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float: /* 0xaa */
-/* EABI doesn't define a float remainder function, but libm does */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl fmodf @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double: /* 0xab */
- /*
- * Generic 64-bit double-precision floating point binary operation.
- * Provide an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * for: add-double, sub-double, mul-double, div-double
- */
- /* doubleop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- fldd d1, [r3] @ d1<- vCC
- fldd d0, [r2] @ d0<- vBB
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- faddd d2, d0, d1 @ s2<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double: /* 0xac */
- /*
- * Generic 64-bit double-precision floating point binary operation.
- * Provide an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * for: add-double, sub-double, mul-double, div-double
- */
- /* doubleop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- fldd d1, [r3] @ d1<- vCC
- fldd d0, [r2] @ d0<- vBB
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fsubd d2, d0, d1 @ s2<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double: /* 0xad */
- /*
- * Generic 64-bit double-precision floating point binary operation.
- * Provide an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * for: add-double, sub-double, mul-double, div-double
- */
- /* doubleop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- fldd d1, [r3] @ d1<- vCC
- fldd d0, [r2] @ d0<- vBB
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fmuld d2, d0, d1 @ s2<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double: /* 0xae */
- /*
- * Generic 64-bit double-precision floating point binary operation.
- * Provide an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * for: add-double, sub-double, mul-double, div-double
- */
- /* doubleop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- fldd d1, [r3] @ d1<- vCC
- fldd d0, [r2] @ d0<- vBB
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fdivd d2, d0, d1 @ s2<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double: /* 0xaf */
-/* EABI doesn't define a double remainder function, but libm does */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl fmod @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- add r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- sub r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- mul r0, r1, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int/2addr
- *
- */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int/2addr
- *
- */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- and r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- orr r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- eor r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- and r1, r1, #31 @ optional op; may set condition codes
- mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- and r1, r1, #31 @ optional op; may set condition codes
- mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- and r1, r1, #31 @ optional op; may set condition codes
- mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long_2addr: /* 0xbb */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- adds r0, r0, r2 @ optional op; may set condition codes
- adc r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- subs r0, r0, r2 @ optional op; may set condition codes
- sbc r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
- /*
- * Signed 64-bit integer multiply, "/2addr" version.
- *
- * See op_mul_long for an explanation.
- *
- * We get a little tight on registers, so to avoid looking up &fp[A]
- * again we stuff it into rINST.
- */
- /* mul-long/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
- mul ip, r2, r1 @ ip<- ZxW
- umull r1, lr, r2, r0 @ r1/lr <- ZxX
- mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
- mov r0, rINST @ r0<- &fp[A] (free up rINST)
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- add r2, r2, lr @ r2<- r2 + low(ZxW + (YxX))
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r0, {r1-r2} @ vAA/vAA+1<- r1/r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long_2addr: /* 0xbe */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 1
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl __aeabi_ldivmod @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 1
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl __aeabi_ldivmod @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- and r0, r0, r2 @ optional op; may set condition codes
- and r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- orr r0, r0, r2 @ optional op; may set condition codes
- orr r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- eor r0, r0, r2 @ optional op; may set condition codes
- eor r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- mov r1, r1, asl r2 @ r1<- r1 << r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
- mov r0, r0, asl r2 @ r0<- r0 << r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shr-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
- mov r1, r1, asr r2 @ r1<- r1 >> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* ushr-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
- mov r1, r1, lsr r2 @ r1<- r1 >>> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- flds s0, [r9] @ s0<- vA
- fadds s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- flds s0, [r9] @ s0<- vA
- fsubs s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- flds s0, [r9] @ s0<- vA
- fmuls s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- flds s0, [r9] @ s0<- vA
- fdivs s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float_2addr: /* 0xca */
-/* EABI doesn't define a float remainder function, but libm does */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- bl fmodf @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double_2addr: /* 0xcb */
- /*
- * Generic 64-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fldd d0, [r9] @ d0<- vA
- faddd d2, d0, d1 @ d2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
- /*
- * Generic 64-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fldd d0, [r9] @ d0<- vA
- fsubd d2, d0, d1 @ d2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
- /*
- * Generic 64-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fldd d0, [r9] @ d0<- vA
- fmuld d2, d0, d1 @ d2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double_2addr: /* 0xce */
- /*
- * Generic 64-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fldd d0, [r9] @ d0<- vA
- fdivd d2, d0, d1 @ d2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
-/* EABI doesn't define a double remainder function, but libm does */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl fmod @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- add r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- rsb r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- mul r0, r1, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int/lit16
- *
- */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int/lit16
- *
- */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- and r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- orr r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- eor r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- add r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- rsb r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- asr r1, r3, #8 @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- mul r0, r1, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit8: /* 0xdb */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int/lit8
- *
- */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int/lit8
- *
- */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit8: /* 0xdd */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- and r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit8: /* 0xde */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- orr r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- eor r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_quick: /* 0xe3 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldr r0, [r3, r1] @ r0<- obj.field
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r2 @ fp[A]<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
- /* iget-wide-quick vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH ip, 1 @ ip<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A]
- CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ fp[A]<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
- /* For: iget-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- EXPORT_PC
- GET_VREG r0, r2 @ r0<- object we're operating on
- bl artIGetObjectFromMterp @ (obj, offset)
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
- SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_quick: /* 0xe6 */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- str r0, [r3, r1] @ obj.field<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
- /* iput-wide-quick vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r3, 1 @ r3<- field byte offset
- GET_VREG r2, r2 @ r2<- fp[B], the object pointer
- ubfx r0, rINST, #8, #4 @ r0<- A
- cmp r2, #0 @ check object for null
- beq common_errNullObject @ object was null
- VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A]
- ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- strd r0, [r2, r3] @ obj.field<- r0/r1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- bl MterpIputObjectQuick
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuick
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeVirtualQuick
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuickRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeVirtualQuickRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- strb r0, [r3, r1] @ obj.field<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte_quick: /* 0xec */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- strb r0, [r3, r1] @ obj.field<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char_quick: /* 0xed */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- strh r0, [r3, r1] @ obj.field<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short_quick: /* 0xee */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- strh r0, [r3, r1] @ obj.field<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrb r0, [r3, r1] @ r0<- obj.field
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r2 @ fp[A]<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrsb r0, [r3, r1] @ r0<- obj.field
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r2 @ fp[A]<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrh r0, [r3, r1] @ r0<- obj.field
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r2 @ fp[A]<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrsh r0, [r3, r1] @ r0<- obj.field
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r2 @ fp[A]<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphic
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokePolymorphic
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 4
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphicRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokePolymorphicRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 4
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom: /* 0xfc */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustom
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeCustom
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
- /*
- * Handle an invoke-custom invocation.
- *
- * for: invoke-custom, invoke-custom/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, call_site@BBBB */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, call_site@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustomRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeCustomRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_handle: /* 0xfe */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodHandle
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstMethodHandle @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 @ load rINST
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_type: /* 0xff */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodType
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstMethodType @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 @ load rINST
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
- .balign 128
-
- .type artMterpAsmInstructionEnd, #object
- .hidden artMterpAsmInstructionEnd
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-/*
- * Convert the float in r0 to a long in r0/r1.
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
- */
-f2l_doconv:
- ubfx r2, r0, #23, #8 @ grab the exponent
- cmp r2, #0xbe @ MININT < x > MAXINT?
- bhs f2l_special_cases
- b __aeabi_f2lz @ tail call to convert float to long
-f2l_special_cases:
- cmp r2, #0xff @ NaN or infinity?
- beq f2l_maybeNaN
-f2l_notNaN:
- adds r0, r0, r0 @ sign bit to carry
- mov r0, #0xffffffff @ assume maxlong for lsw
- mov r1, #0x7fffffff @ assume maxlong for msw
- adc r0, r0, #0
- adc r1, r1, #0 @ convert maxlong to minlong if exp negative
- bx lr @ return
-f2l_maybeNaN:
- lsls r3, r0, #9
- beq f2l_notNaN @ if fraction is non-zero, it's a NaN
- mov r0, #0
- mov r1, #0
- bx lr @ return 0 for NaN
-
-/*
- * Convert the double in r0/r1 to a long in r0/r1.
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
- */
-d2l_doconv:
- ubfx r2, r1, #20, #11 @ grab the exponent
- movw r3, #0x43e
- cmp r2, r3 @ MINLONG < x > MAXLONG?
- bhs d2l_special_cases
- b __aeabi_d2lz @ tail call to convert double to long
-d2l_special_cases:
- movw r3, #0x7ff
- cmp r2, r3
- beq d2l_maybeNaN @ NaN?
-d2l_notNaN:
- adds r1, r1, r1 @ sign bit to carry
- mov r0, #0xffffffff @ assume maxlong for lsw
- mov r1, #0x7fffffff @ assume maxlong for msw
- adc r0, r0, #0
- adc r1, r1, #0 @ convert maxlong to minlong if exp negative
- bx lr @ return
-d2l_maybeNaN:
- orrs r3, r0, r1, lsl #12
- beq d2l_notNaN @ if fraction is non-zero, it's a NaN
- mov r0, #0
- mov r1, #0
- bx lr @ return 0 for NaN
-
- .type artMterpAsmAltInstructionStart, #object
- .hidden artMterpAsmAltInstructionStart
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_nop
- sub lr, lr, #(.L_ALT_op_nop - .L_op_nop) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move: /* 0x01 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move
- sub lr, lr, #(.L_ALT_op_move - .L_op_move) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_from16
- sub lr, lr, #(.L_ALT_op_move_from16 - .L_op_move_from16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_16
- sub lr, lr, #(.L_ALT_op_move_16 - .L_op_move_16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_wide
- sub lr, lr, #(.L_ALT_op_move_wide - .L_op_move_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_wide_from16
- sub lr, lr, #(.L_ALT_op_move_wide_from16 - .L_op_move_wide_from16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_wide_16
- sub lr, lr, #(.L_ALT_op_move_wide_16 - .L_op_move_wide_16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_object
- sub lr, lr, #(.L_ALT_op_move_object - .L_op_move_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_object_from16
- sub lr, lr, #(.L_ALT_op_move_object_from16 - .L_op_move_object_from16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_object_16
- sub lr, lr, #(.L_ALT_op_move_object_16 - .L_op_move_object_16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_result
- sub lr, lr, #(.L_ALT_op_move_result - .L_op_move_result) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_result_wide
- sub lr, lr, #(.L_ALT_op_move_result_wide - .L_op_move_result_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_result_object
- sub lr, lr, #(.L_ALT_op_move_result_object - .L_op_move_result_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_exception
- sub lr, lr, #(.L_ALT_op_move_exception - .L_op_move_exception) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_return_void
- sub lr, lr, #(.L_ALT_op_return_void - .L_op_return_void) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return: /* 0x0f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_return
- sub lr, lr, #(.L_ALT_op_return - .L_op_return) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_return_wide
- sub lr, lr, #(.L_ALT_op_return_wide - .L_op_return_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_return_object
- sub lr, lr, #(.L_ALT_op_return_object - .L_op_return_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_4
- sub lr, lr, #(.L_ALT_op_const_4 - .L_op_const_4) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_16
- sub lr, lr, #(.L_ALT_op_const_16 - .L_op_const_16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const: /* 0x14 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const
- sub lr, lr, #(.L_ALT_op_const - .L_op_const) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_high16
- sub lr, lr, #(.L_ALT_op_const_high16 - .L_op_const_high16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_wide_16
- sub lr, lr, #(.L_ALT_op_const_wide_16 - .L_op_const_wide_16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_wide_32
- sub lr, lr, #(.L_ALT_op_const_wide_32 - .L_op_const_wide_32) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_wide
- sub lr, lr, #(.L_ALT_op_const_wide - .L_op_const_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_wide_high16
- sub lr, lr, #(.L_ALT_op_const_wide_high16 - .L_op_const_wide_high16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_string
- sub lr, lr, #(.L_ALT_op_const_string - .L_op_const_string) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_string_jumbo
- sub lr, lr, #(.L_ALT_op_const_string_jumbo - .L_op_const_string_jumbo) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_class
- sub lr, lr, #(.L_ALT_op_const_class - .L_op_const_class) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_monitor_enter
- sub lr, lr, #(.L_ALT_op_monitor_enter - .L_op_monitor_enter) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_monitor_exit
- sub lr, lr, #(.L_ALT_op_monitor_exit - .L_op_monitor_exit) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_check_cast
- sub lr, lr, #(.L_ALT_op_check_cast - .L_op_check_cast) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_instance_of
- sub lr, lr, #(.L_ALT_op_instance_of - .L_op_instance_of) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_array_length
- sub lr, lr, #(.L_ALT_op_array_length - .L_op_array_length) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_new_instance
- sub lr, lr, #(.L_ALT_op_new_instance - .L_op_new_instance) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_new_array
- sub lr, lr, #(.L_ALT_op_new_array - .L_op_new_array) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_filled_new_array
- sub lr, lr, #(.L_ALT_op_filled_new_array - .L_op_filled_new_array) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_filled_new_array_range
- sub lr, lr, #(.L_ALT_op_filled_new_array_range - .L_op_filled_new_array_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_fill_array_data
- sub lr, lr, #(.L_ALT_op_fill_array_data - .L_op_fill_array_data) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_throw
- sub lr, lr, #(.L_ALT_op_throw - .L_op_throw) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_goto
- sub lr, lr, #(.L_ALT_op_goto - .L_op_goto) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_goto_16
- sub lr, lr, #(.L_ALT_op_goto_16 - .L_op_goto_16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_goto_32
- sub lr, lr, #(.L_ALT_op_goto_32 - .L_op_goto_32) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_packed_switch
- sub lr, lr, #(.L_ALT_op_packed_switch - .L_op_packed_switch) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sparse_switch
- sub lr, lr, #(.L_ALT_op_sparse_switch - .L_op_sparse_switch) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_cmpl_float
- sub lr, lr, #(.L_ALT_op_cmpl_float - .L_op_cmpl_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_cmpg_float
- sub lr, lr, #(.L_ALT_op_cmpg_float - .L_op_cmpg_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_cmpl_double
- sub lr, lr, #(.L_ALT_op_cmpl_double - .L_op_cmpl_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_cmpg_double
- sub lr, lr, #(.L_ALT_op_cmpg_double - .L_op_cmpg_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_cmp_long
- sub lr, lr, #(.L_ALT_op_cmp_long - .L_op_cmp_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_eq
- sub lr, lr, #(.L_ALT_op_if_eq - .L_op_if_eq) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_ne
- sub lr, lr, #(.L_ALT_op_if_ne - .L_op_if_ne) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_lt
- sub lr, lr, #(.L_ALT_op_if_lt - .L_op_if_lt) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_ge
- sub lr, lr, #(.L_ALT_op_if_ge - .L_op_if_ge) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_gt
- sub lr, lr, #(.L_ALT_op_if_gt - .L_op_if_gt) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_le
- sub lr, lr, #(.L_ALT_op_if_le - .L_op_if_le) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_eqz
- sub lr, lr, #(.L_ALT_op_if_eqz - .L_op_if_eqz) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_nez
- sub lr, lr, #(.L_ALT_op_if_nez - .L_op_if_nez) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_ltz
- sub lr, lr, #(.L_ALT_op_if_ltz - .L_op_if_ltz) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_gez
- sub lr, lr, #(.L_ALT_op_if_gez - .L_op_if_gez) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_gtz
- sub lr, lr, #(.L_ALT_op_if_gtz - .L_op_if_gtz) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_lez
- sub lr, lr, #(.L_ALT_op_if_lez - .L_op_if_lez) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_3e
- sub lr, lr, #(.L_ALT_op_unused_3e - .L_op_unused_3e) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_3f
- sub lr, lr, #(.L_ALT_op_unused_3f - .L_op_unused_3f) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_40
- sub lr, lr, #(.L_ALT_op_unused_40 - .L_op_unused_40) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_41
- sub lr, lr, #(.L_ALT_op_unused_41 - .L_op_unused_41) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_42
- sub lr, lr, #(.L_ALT_op_unused_42 - .L_op_unused_42) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_43
- sub lr, lr, #(.L_ALT_op_unused_43 - .L_op_unused_43) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget
- sub lr, lr, #(.L_ALT_op_aget - .L_op_aget) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget_wide
- sub lr, lr, #(.L_ALT_op_aget_wide - .L_op_aget_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget_object
- sub lr, lr, #(.L_ALT_op_aget_object - .L_op_aget_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget_boolean
- sub lr, lr, #(.L_ALT_op_aget_boolean - .L_op_aget_boolean) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget_byte
- sub lr, lr, #(.L_ALT_op_aget_byte - .L_op_aget_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget_char
- sub lr, lr, #(.L_ALT_op_aget_char - .L_op_aget_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget_short
- sub lr, lr, #(.L_ALT_op_aget_short - .L_op_aget_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput
- sub lr, lr, #(.L_ALT_op_aput - .L_op_aput) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput_wide
- sub lr, lr, #(.L_ALT_op_aput_wide - .L_op_aput_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput_object
- sub lr, lr, #(.L_ALT_op_aput_object - .L_op_aput_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput_boolean
- sub lr, lr, #(.L_ALT_op_aput_boolean - .L_op_aput_boolean) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput_byte
- sub lr, lr, #(.L_ALT_op_aput_byte - .L_op_aput_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput_char
- sub lr, lr, #(.L_ALT_op_aput_char - .L_op_aput_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput_short
- sub lr, lr, #(.L_ALT_op_aput_short - .L_op_aput_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget
- sub lr, lr, #(.L_ALT_op_iget - .L_op_iget) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_wide
- sub lr, lr, #(.L_ALT_op_iget_wide - .L_op_iget_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_object
- sub lr, lr, #(.L_ALT_op_iget_object - .L_op_iget_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_boolean
- sub lr, lr, #(.L_ALT_op_iget_boolean - .L_op_iget_boolean) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_byte
- sub lr, lr, #(.L_ALT_op_iget_byte - .L_op_iget_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_char
- sub lr, lr, #(.L_ALT_op_iget_char - .L_op_iget_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_short
- sub lr, lr, #(.L_ALT_op_iget_short - .L_op_iget_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput
- sub lr, lr, #(.L_ALT_op_iput - .L_op_iput) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_wide
- sub lr, lr, #(.L_ALT_op_iput_wide - .L_op_iput_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_object
- sub lr, lr, #(.L_ALT_op_iput_object - .L_op_iput_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_boolean
- sub lr, lr, #(.L_ALT_op_iput_boolean - .L_op_iput_boolean) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_byte
- sub lr, lr, #(.L_ALT_op_iput_byte - .L_op_iput_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_char
- sub lr, lr, #(.L_ALT_op_iput_char - .L_op_iput_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_short
- sub lr, lr, #(.L_ALT_op_iput_short - .L_op_iput_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget
- sub lr, lr, #(.L_ALT_op_sget - .L_op_sget) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget_wide
- sub lr, lr, #(.L_ALT_op_sget_wide - .L_op_sget_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget_object
- sub lr, lr, #(.L_ALT_op_sget_object - .L_op_sget_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget_boolean
- sub lr, lr, #(.L_ALT_op_sget_boolean - .L_op_sget_boolean) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget_byte
- sub lr, lr, #(.L_ALT_op_sget_byte - .L_op_sget_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget_char
- sub lr, lr, #(.L_ALT_op_sget_char - .L_op_sget_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget_short
- sub lr, lr, #(.L_ALT_op_sget_short - .L_op_sget_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput
- sub lr, lr, #(.L_ALT_op_sput - .L_op_sput) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput_wide
- sub lr, lr, #(.L_ALT_op_sput_wide - .L_op_sput_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput_object
- sub lr, lr, #(.L_ALT_op_sput_object - .L_op_sput_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput_boolean
- sub lr, lr, #(.L_ALT_op_sput_boolean - .L_op_sput_boolean) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput_byte
- sub lr, lr, #(.L_ALT_op_sput_byte - .L_op_sput_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput_char
- sub lr, lr, #(.L_ALT_op_sput_char - .L_op_sput_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput_short
- sub lr, lr, #(.L_ALT_op_sput_short - .L_op_sput_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_virtual
- sub lr, lr, #(.L_ALT_op_invoke_virtual - .L_op_invoke_virtual) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_super
- sub lr, lr, #(.L_ALT_op_invoke_super - .L_op_invoke_super) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_direct
- sub lr, lr, #(.L_ALT_op_invoke_direct - .L_op_invoke_direct) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_static
- sub lr, lr, #(.L_ALT_op_invoke_static - .L_op_invoke_static) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_interface
- sub lr, lr, #(.L_ALT_op_invoke_interface - .L_op_invoke_interface) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_return_void_no_barrier
- sub lr, lr, #(.L_ALT_op_return_void_no_barrier - .L_op_return_void_no_barrier) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_virtual_range
- sub lr, lr, #(.L_ALT_op_invoke_virtual_range - .L_op_invoke_virtual_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_super_range
- sub lr, lr, #(.L_ALT_op_invoke_super_range - .L_op_invoke_super_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_direct_range
- sub lr, lr, #(.L_ALT_op_invoke_direct_range - .L_op_invoke_direct_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_static_range
- sub lr, lr, #(.L_ALT_op_invoke_static_range - .L_op_invoke_static_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_interface_range
- sub lr, lr, #(.L_ALT_op_invoke_interface_range - .L_op_invoke_interface_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_79
- sub lr, lr, #(.L_ALT_op_unused_79 - .L_op_unused_79) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_7a
- sub lr, lr, #(.L_ALT_op_unused_7a - .L_op_unused_7a) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_neg_int
- sub lr, lr, #(.L_ALT_op_neg_int - .L_op_neg_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_not_int
- sub lr, lr, #(.L_ALT_op_not_int - .L_op_not_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_neg_long
- sub lr, lr, #(.L_ALT_op_neg_long - .L_op_neg_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_not_long
- sub lr, lr, #(.L_ALT_op_not_long - .L_op_not_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_neg_float
- sub lr, lr, #(.L_ALT_op_neg_float - .L_op_neg_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_neg_double
- sub lr, lr, #(.L_ALT_op_neg_double - .L_op_neg_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_int_to_long
- sub lr, lr, #(.L_ALT_op_int_to_long - .L_op_int_to_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_int_to_float
- sub lr, lr, #(.L_ALT_op_int_to_float - .L_op_int_to_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_int_to_double
- sub lr, lr, #(.L_ALT_op_int_to_double - .L_op_int_to_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_long_to_int
- sub lr, lr, #(.L_ALT_op_long_to_int - .L_op_long_to_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_long_to_float
- sub lr, lr, #(.L_ALT_op_long_to_float - .L_op_long_to_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_long_to_double
- sub lr, lr, #(.L_ALT_op_long_to_double - .L_op_long_to_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_float_to_int
- sub lr, lr, #(.L_ALT_op_float_to_int - .L_op_float_to_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_float_to_long
- sub lr, lr, #(.L_ALT_op_float_to_long - .L_op_float_to_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_float_to_double
- sub lr, lr, #(.L_ALT_op_float_to_double - .L_op_float_to_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_double_to_int
- sub lr, lr, #(.L_ALT_op_double_to_int - .L_op_double_to_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_double_to_long
- sub lr, lr, #(.L_ALT_op_double_to_long - .L_op_double_to_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_double_to_float
- sub lr, lr, #(.L_ALT_op_double_to_float - .L_op_double_to_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_int_to_byte
- sub lr, lr, #(.L_ALT_op_int_to_byte - .L_op_int_to_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_int_to_char
- sub lr, lr, #(.L_ALT_op_int_to_char - .L_op_int_to_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_int_to_short
- sub lr, lr, #(.L_ALT_op_int_to_short - .L_op_int_to_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_int
- sub lr, lr, #(.L_ALT_op_add_int - .L_op_add_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_int
- sub lr, lr, #(.L_ALT_op_sub_int - .L_op_sub_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_int
- sub lr, lr, #(.L_ALT_op_mul_int - .L_op_mul_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_int
- sub lr, lr, #(.L_ALT_op_div_int - .L_op_div_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_int
- sub lr, lr, #(.L_ALT_op_rem_int - .L_op_rem_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_and_int
- sub lr, lr, #(.L_ALT_op_and_int - .L_op_and_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_or_int
- sub lr, lr, #(.L_ALT_op_or_int - .L_op_or_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_xor_int
- sub lr, lr, #(.L_ALT_op_xor_int - .L_op_xor_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shl_int
- sub lr, lr, #(.L_ALT_op_shl_int - .L_op_shl_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shr_int
- sub lr, lr, #(.L_ALT_op_shr_int - .L_op_shr_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_ushr_int
- sub lr, lr, #(.L_ALT_op_ushr_int - .L_op_ushr_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_long
- sub lr, lr, #(.L_ALT_op_add_long - .L_op_add_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_long
- sub lr, lr, #(.L_ALT_op_sub_long - .L_op_sub_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_long
- sub lr, lr, #(.L_ALT_op_mul_long - .L_op_mul_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_long
- sub lr, lr, #(.L_ALT_op_div_long - .L_op_div_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_long
- sub lr, lr, #(.L_ALT_op_rem_long - .L_op_rem_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_and_long
- sub lr, lr, #(.L_ALT_op_and_long - .L_op_and_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_or_long
- sub lr, lr, #(.L_ALT_op_or_long - .L_op_or_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_xor_long
- sub lr, lr, #(.L_ALT_op_xor_long - .L_op_xor_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shl_long
- sub lr, lr, #(.L_ALT_op_shl_long - .L_op_shl_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shr_long
- sub lr, lr, #(.L_ALT_op_shr_long - .L_op_shr_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_ushr_long
- sub lr, lr, #(.L_ALT_op_ushr_long - .L_op_ushr_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_float
- sub lr, lr, #(.L_ALT_op_add_float - .L_op_add_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_float
- sub lr, lr, #(.L_ALT_op_sub_float - .L_op_sub_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_float
- sub lr, lr, #(.L_ALT_op_mul_float - .L_op_mul_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_float
- sub lr, lr, #(.L_ALT_op_div_float - .L_op_div_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_float
- sub lr, lr, #(.L_ALT_op_rem_float - .L_op_rem_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_double
- sub lr, lr, #(.L_ALT_op_add_double - .L_op_add_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_double
- sub lr, lr, #(.L_ALT_op_sub_double - .L_op_sub_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_double
- sub lr, lr, #(.L_ALT_op_mul_double - .L_op_mul_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_double
- sub lr, lr, #(.L_ALT_op_div_double - .L_op_div_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_double
- sub lr, lr, #(.L_ALT_op_rem_double - .L_op_rem_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_int_2addr
- sub lr, lr, #(.L_ALT_op_add_int_2addr - .L_op_add_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_int_2addr
- sub lr, lr, #(.L_ALT_op_sub_int_2addr - .L_op_sub_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_int_2addr
- sub lr, lr, #(.L_ALT_op_mul_int_2addr - .L_op_mul_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_int_2addr
- sub lr, lr, #(.L_ALT_op_div_int_2addr - .L_op_div_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_int_2addr
- sub lr, lr, #(.L_ALT_op_rem_int_2addr - .L_op_rem_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_and_int_2addr
- sub lr, lr, #(.L_ALT_op_and_int_2addr - .L_op_and_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_or_int_2addr
- sub lr, lr, #(.L_ALT_op_or_int_2addr - .L_op_or_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_xor_int_2addr
- sub lr, lr, #(.L_ALT_op_xor_int_2addr - .L_op_xor_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shl_int_2addr
- sub lr, lr, #(.L_ALT_op_shl_int_2addr - .L_op_shl_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shr_int_2addr
- sub lr, lr, #(.L_ALT_op_shr_int_2addr - .L_op_shr_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_ushr_int_2addr
- sub lr, lr, #(.L_ALT_op_ushr_int_2addr - .L_op_ushr_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_long_2addr
- sub lr, lr, #(.L_ALT_op_add_long_2addr - .L_op_add_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_long_2addr
- sub lr, lr, #(.L_ALT_op_sub_long_2addr - .L_op_sub_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_long_2addr
- sub lr, lr, #(.L_ALT_op_mul_long_2addr - .L_op_mul_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_long_2addr
- sub lr, lr, #(.L_ALT_op_div_long_2addr - .L_op_div_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_long_2addr
- sub lr, lr, #(.L_ALT_op_rem_long_2addr - .L_op_rem_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_and_long_2addr
- sub lr, lr, #(.L_ALT_op_and_long_2addr - .L_op_and_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_or_long_2addr
- sub lr, lr, #(.L_ALT_op_or_long_2addr - .L_op_or_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_xor_long_2addr
- sub lr, lr, #(.L_ALT_op_xor_long_2addr - .L_op_xor_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shl_long_2addr
- sub lr, lr, #(.L_ALT_op_shl_long_2addr - .L_op_shl_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shr_long_2addr
- sub lr, lr, #(.L_ALT_op_shr_long_2addr - .L_op_shr_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_ushr_long_2addr
- sub lr, lr, #(.L_ALT_op_ushr_long_2addr - .L_op_ushr_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_float_2addr
- sub lr, lr, #(.L_ALT_op_add_float_2addr - .L_op_add_float_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_float_2addr
- sub lr, lr, #(.L_ALT_op_sub_float_2addr - .L_op_sub_float_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_float_2addr
- sub lr, lr, #(.L_ALT_op_mul_float_2addr - .L_op_mul_float_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_float_2addr
- sub lr, lr, #(.L_ALT_op_div_float_2addr - .L_op_div_float_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_float_2addr
- sub lr, lr, #(.L_ALT_op_rem_float_2addr - .L_op_rem_float_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_double_2addr
- sub lr, lr, #(.L_ALT_op_add_double_2addr - .L_op_add_double_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_double_2addr
- sub lr, lr, #(.L_ALT_op_sub_double_2addr - .L_op_sub_double_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_double_2addr
- sub lr, lr, #(.L_ALT_op_mul_double_2addr - .L_op_mul_double_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_double_2addr
- sub lr, lr, #(.L_ALT_op_div_double_2addr - .L_op_div_double_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_double_2addr
- sub lr, lr, #(.L_ALT_op_rem_double_2addr - .L_op_rem_double_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_int_lit16
- sub lr, lr, #(.L_ALT_op_add_int_lit16 - .L_op_add_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rsub_int
- sub lr, lr, #(.L_ALT_op_rsub_int - .L_op_rsub_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_int_lit16
- sub lr, lr, #(.L_ALT_op_mul_int_lit16 - .L_op_mul_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_int_lit16
- sub lr, lr, #(.L_ALT_op_div_int_lit16 - .L_op_div_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_int_lit16
- sub lr, lr, #(.L_ALT_op_rem_int_lit16 - .L_op_rem_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_and_int_lit16
- sub lr, lr, #(.L_ALT_op_and_int_lit16 - .L_op_and_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_or_int_lit16
- sub lr, lr, #(.L_ALT_op_or_int_lit16 - .L_op_or_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_xor_int_lit16
- sub lr, lr, #(.L_ALT_op_xor_int_lit16 - .L_op_xor_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_int_lit8
- sub lr, lr, #(.L_ALT_op_add_int_lit8 - .L_op_add_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rsub_int_lit8
- sub lr, lr, #(.L_ALT_op_rsub_int_lit8 - .L_op_rsub_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_int_lit8
- sub lr, lr, #(.L_ALT_op_mul_int_lit8 - .L_op_mul_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_int_lit8
- sub lr, lr, #(.L_ALT_op_div_int_lit8 - .L_op_div_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_int_lit8
- sub lr, lr, #(.L_ALT_op_rem_int_lit8 - .L_op_rem_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_and_int_lit8
- sub lr, lr, #(.L_ALT_op_and_int_lit8 - .L_op_and_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_or_int_lit8
- sub lr, lr, #(.L_ALT_op_or_int_lit8 - .L_op_or_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_xor_int_lit8
- sub lr, lr, #(.L_ALT_op_xor_int_lit8 - .L_op_xor_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shl_int_lit8
- sub lr, lr, #(.L_ALT_op_shl_int_lit8 - .L_op_shl_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shr_int_lit8
- sub lr, lr, #(.L_ALT_op_shr_int_lit8 - .L_op_shr_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_ushr_int_lit8
- sub lr, lr, #(.L_ALT_op_ushr_int_lit8 - .L_op_ushr_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_quick
- sub lr, lr, #(.L_ALT_op_iget_quick - .L_op_iget_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_wide_quick
- sub lr, lr, #(.L_ALT_op_iget_wide_quick - .L_op_iget_wide_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_object_quick
- sub lr, lr, #(.L_ALT_op_iget_object_quick - .L_op_iget_object_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_quick
- sub lr, lr, #(.L_ALT_op_iput_quick - .L_op_iput_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_wide_quick
- sub lr, lr, #(.L_ALT_op_iput_wide_quick - .L_op_iput_wide_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_object_quick
- sub lr, lr, #(.L_ALT_op_iput_object_quick - .L_op_iput_object_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_virtual_quick
- sub lr, lr, #(.L_ALT_op_invoke_virtual_quick - .L_op_invoke_virtual_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_virtual_range_quick
- sub lr, lr, #(.L_ALT_op_invoke_virtual_range_quick - .L_op_invoke_virtual_range_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_boolean_quick
- sub lr, lr, #(.L_ALT_op_iput_boolean_quick - .L_op_iput_boolean_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_byte_quick
- sub lr, lr, #(.L_ALT_op_iput_byte_quick - .L_op_iput_byte_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_char_quick
- sub lr, lr, #(.L_ALT_op_iput_char_quick - .L_op_iput_char_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_short_quick
- sub lr, lr, #(.L_ALT_op_iput_short_quick - .L_op_iput_short_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_boolean_quick
- sub lr, lr, #(.L_ALT_op_iget_boolean_quick - .L_op_iget_boolean_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_byte_quick
- sub lr, lr, #(.L_ALT_op_iget_byte_quick - .L_op_iget_byte_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_char_quick
- sub lr, lr, #(.L_ALT_op_iget_char_quick - .L_op_iget_char_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_short_quick
- sub lr, lr, #(.L_ALT_op_iget_short_quick - .L_op_iget_short_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f3
- sub lr, lr, #(.L_ALT_op_unused_f3 - .L_op_unused_f3) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f4
- sub lr, lr, #(.L_ALT_op_unused_f4 - .L_op_unused_f4) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f5
- sub lr, lr, #(.L_ALT_op_unused_f5 - .L_op_unused_f5) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f6
- sub lr, lr, #(.L_ALT_op_unused_f6 - .L_op_unused_f6) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f7
- sub lr, lr, #(.L_ALT_op_unused_f7 - .L_op_unused_f7) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f8
- sub lr, lr, #(.L_ALT_op_unused_f8 - .L_op_unused_f8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f9
- sub lr, lr, #(.L_ALT_op_unused_f9 - .L_op_unused_f9) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_polymorphic
- sub lr, lr, #(.L_ALT_op_invoke_polymorphic - .L_op_invoke_polymorphic) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_polymorphic_range
- sub lr, lr, #(.L_ALT_op_invoke_polymorphic_range - .L_op_invoke_polymorphic_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_custom
- sub lr, lr, #(.L_ALT_op_invoke_custom - .L_op_invoke_custom) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_custom_range
- sub lr, lr, #(.L_ALT_op_invoke_custom_range - .L_op_invoke_custom_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_method_handle
- sub lr, lr, #(.L_ALT_op_const_method_handle - .L_op_const_method_handle) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_method_type
- sub lr, lr, #(.L_ALT_op_const_method_type - .L_op_const_method_type) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
- .balign 128
-
- .type artMterpAsmAltInstructionEnd, #object
- .hidden artMterpAsmAltInstructionEnd
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogDivideByZeroException
-#endif
- b MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogArrayIndexException
-#endif
- b MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNegativeArraySizeException
-#endif
- b MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNoSuchMethodException
-#endif
- b MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNullObjectException
-#endif
- b MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogExceptionThrownException
-#endif
- b MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- ldr r2, [rSELF, #THREAD_FLAGS_OFFSET]
- bl MterpLogSuspendFallback
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- ldr r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
- cmp r0, #0 @ Exception pending?
- beq MterpFallback @ If not, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpHandleException @ (self, shadow_frame)
- cmp r0, #0
- beq MterpExceptionReturn @ no local catch, back to caller.
- ldr r0, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
- ldr r1, [rFP, #OFF_FP_DEX_PC]
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
- add rPC, r0, r1, lsl #1 @ generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- /* resume execution at catch block */
- EXPORT_PC
- FETCH_INST
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 32 bits)
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
- cmp rINST, #0
-MterpCommonTakenBranch:
- bgt .L_forward_branch @ don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- cmp rPROFILE, #JIT_CHECK_OSR
- beq .L_osr_check
- subsgt rPROFILE, #1
- beq .L_add_batch @ counted down to zero - report
-.L_resume_backward_branch:
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- REFRESH_IBASE
- add r2, rINST, rINST @ r2<- byte offset
- FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- bne .L_suspend_request_pending
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC
- mov r0, rSELF
- bl MterpSuspendCheck @ (self)
- cmp r0, #0
- bne MterpFallback
- REFRESH_IBASE @ might have changed during suspend
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-.L_no_count_backwards:
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- bne .L_resume_backward_branch
-.L_osr_check:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
- cmp r0, #0
- bne MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_osr_forward
-.L_resume_forward_branch:
- add r2, rINST, rINST @ r2<- byte offset
- FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-.L_check_osr_forward:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
- cmp r0, #0
- bne MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- add r1, rFP, #OFF_FP_SHADOWFRAME
- strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- ldr r0, [rFP, #OFF_FP_METHOD]
- mov r2, rSELF
- bl MterpAddHotnessBatch @ (method, shadow_frame, self)
- mov rPROFILE, r0 @ restore new hotness countdown to rPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, #2
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
- cmp r0, #0
- bne MterpOnStackReplacement
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rINST
- bl MterpLogOSR
-#endif
- mov r0, #1 @ Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogFallback
-#endif
-MterpCommonFallback:
- mov r0, #0 @ signal retry with reference interpreter.
- b MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR. Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- mov r0, #1 @ signal return to caller.
- b MterpDone
-MterpReturn:
- ldr r2, [rFP, #OFF_FP_RESULT_REGISTER]
- str r0, [r2]
- str r1, [r2, #4]
- mov r0, #1 @ signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- cmp rPROFILE, #0
- bgt MterpProfileActive @ if > 0, we may have some counts to report.
- ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
-
-MterpProfileActive:
- mov rINST, r0 @ stash return value
- /* Report cached hotness counts */
- ldr r0, [rFP, #OFF_FP_METHOD]
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rSELF
- strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- bl MterpAddHotnessBatch @ (method, shadow_frame, self)
- mov r0, rINST @ restore return value
- ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
-
- END ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
deleted file mode 100644
index 24a2252..0000000
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ /dev/null
@@ -1,10644 +0,0 @@
-/* DO NOT EDIT: This file was generated by gen-mterp.py. */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat xFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via xFP &
- number_of_vregs_.
-
- */
-
-/*
-ARM64 Runtime register usage conventions.
-
- r0 : w0 is 32-bit return register and x0 is 64-bit.
- r0-r7 : Argument registers.
- r8-r15 : Caller save registers (used as temporary registers).
- r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
- the linker, by the trampolines and other stubs (the backend uses
- these as temporary registers).
- r18 : Caller save register (used as temporary register).
- r19 : Pointer to thread-local storage.
- r20-r29: Callee save registers.
- r30 : (lr) is reserved (the link register).
- rsp : (sp) is reserved (the stack pointer).
- rzr : (zr) is reserved (the zero register).
-
- Floating-point registers
- v0-v31
-
- v0 : s0 is return register for singles (32-bit) and d0 for doubles (64-bit).
- This is analogous to the C/C++ (hard-float) calling convention.
- v0-v7 : Floating-point argument registers in both Dalvik and C/C++ conventions.
- Also used as temporary and codegen scratch registers.
-
- v0-v7 and v16-v31 : trashed across C calls.
- v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved).
-
- v16-v31: Used as codegen temp/scratch.
- v8-v15 : Can be used for promotion.
-
- Must maintain 16-byte stack alignment.
-
-Mterp notes:
-
-The following registers have fixed assignments:
-
- reg nick purpose
- x20 xPC interpreted program counter, used for fetching instructions
- x21 xFP interpreted frame pointer, used for accessing locals and args
- x22 xSELF self (Thread) pointer
- x23 xINST first 16-bit code unit of current instruction
- x24 xIBASE interpreted instruction base pointer, used for computed goto
- x25 xREFS base of object references in shadow frame (ideally, we'll get rid of this later).
- x26 wPROFILE jit profile hotness countdown
- x16 ip scratch reg
- x17 ip2 scratch reg (used by macros)
-
-Macros are provided for common operations. They MUST NOT alter unspecified registers or condition
-codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/* During bringup, we'll use the shadow frame model instead of xFP */
-/* single-purpose registers, given names for clarity */
-#define xPC x20
-#define CFI_DEX 20 // DWARF register number of the register holding dex-pc (xPC).
-#define CFI_TMP 0 // DWARF register number of the first argument register (r0).
-#define xFP x21
-#define xSELF x22
-#define xINST x23
-#define wINST w23
-#define xIBASE x24
-#define xREFS x25
-#define wPROFILE w26
-#define xPROFILE x26
-#define ip x16
-#define ip2 x17
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep xFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- str xPC, [xFP, #OFF_FP_DEX_PC_PTR]
-.endm
-
-/*
- * Fetch the next instruction from xPC into wINST. Does not advance xPC.
- */
-.macro FETCH_INST
- ldrh wINST, [xPC]
-.endm
-
-/*
- * Fetch the next instruction from the specified offset. Advances xPC
- * to point to the next instruction. "_count" is in 16-bit code units.
- *
- * Because of the limited size of immediate constants on ARM, this is only
- * suitable for small forward movements (i.e. don't try to implement "goto"
- * with this).
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
- ldrh wINST, [xPC, #((\count)*2)]!
-.endm
-
-/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to xPC and xINST).
- */
-.macro PREFETCH_ADVANCE_INST dreg, sreg, count
- ldrh \dreg, [\sreg, #((\count)*2)]!
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update xPC. Used to load
- * xINST ahead of possible exception point. Be sure to manually advance xPC
- * later.
- */
-.macro PREFETCH_INST count
- ldrh wINST, [xPC, #((\count)*2)]
-.endm
-
-/* Advance xPC by some number of code units. */
-.macro ADVANCE count
- add xPC, xPC, #((\count)*2)
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg and advance xPC.
- * xPC to point to the next instruction. "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value. Must not set flags.
- *
- */
-.macro FETCH_ADVANCE_INST_RB reg
- add xPC, xPC, \reg, sxtw
- ldrh wINST, [xPC]
-.endm
-
-/*
- * Fetch a half-word code unit from an offset past the current PC. The
- * "_count" value is in 16-bit code units. Does not advance xPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-.macro FETCH reg, count
- ldrh \reg, [xPC, #((\count)*2)]
-.endm
-
-.macro FETCH_S reg, count
- ldrsh \reg, [xPC, #((\count)*2)]
-.endm
-
-/*
- * Fetch one byte from an offset past the current PC. Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-.macro FETCH_B reg, count, byte
- ldrb \reg, [xPC, #((\count)*2+(\byte))]
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
- and \reg, xINST, #255
-.endm
-
-/*
- * Put the prefetched instruction's opcode field into the specified register.
- */
-.macro GET_PREFETCHED_OPCODE oreg, ireg
- and \oreg, \ireg, #255
-.endm
-
-/*
- * Begin executing the opcode in _reg. Clobbers reg
- */
-
-.macro GOTO_OPCODE reg
- add \reg, xIBASE, \reg, lsl #7
- br \reg
-.endm
-.macro GOTO_OPCODE_BASE base,reg
- add \reg, \base, \reg, lsl #7
- br \reg
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-.macro GET_VREG reg, vreg
- ldr \reg, [xFP, \vreg, uxtw #2]
-.endm
-.macro SET_VREG reg, vreg
- str \reg, [xFP, \vreg, uxtw #2]
- str wzr, [xREFS, \vreg, uxtw #2]
-.endm
-.macro SET_VREG_OBJECT reg, vreg, tmpreg
- str \reg, [xFP, \vreg, uxtw #2]
- str \reg, [xREFS, \vreg, uxtw #2]
-.endm
-
-/*
- * Get/set the 64-bit value from a Dalvik register.
- * TUNING: can we do better here?
- */
-.macro GET_VREG_WIDE reg, vreg
- add ip2, xFP, \vreg, lsl #2
- ldr \reg, [ip2]
-.endm
-.macro SET_VREG_WIDE reg, vreg
- add ip2, xFP, \vreg, lsl #2
- str \reg, [ip2]
- add ip2, xREFS, \vreg, lsl #2
- str xzr, [ip2]
-.endm
-
-/*
- * Get the 32-bit value from a Dalvik register and sign-extend to 64-bit.
- * Used to avoid an extra instruction in int-to-long.
- */
-.macro GET_VREG_S reg, vreg
- ldrsw \reg, [xFP, \vreg, uxtw #2]
-.endm
-
-/*
- * Convert a virtual register index into an address.
- */
-.macro VREG_INDEX_TO_ADDR reg, vreg
- add \reg, xFP, \vreg, lsl #2 /* WARNING: handle shadow frame vreg zero if store */
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-.endm
-
-/*
- * Save two registers to the stack.
- */
-.macro SAVE_TWO_REGS reg1, reg2, offset
- stp \reg1, \reg2, [sp, #(\offset)]
- .cfi_rel_offset \reg1, (\offset)
- .cfi_rel_offset \reg2, (\offset) + 8
-.endm
-
-/*
- * Restore two registers from the stack.
- */
-.macro RESTORE_TWO_REGS reg1, reg2, offset
- ldp \reg1, \reg2, [sp, #(\offset)]
- .cfi_restore \reg1
- .cfi_restore \reg2
-.endm
-
-/*
- * Increase frame size and save two registers to the bottom of the stack.
- */
-.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment
- stp \reg1, \reg2, [sp, #-(\frame_adjustment)]!
- .cfi_adjust_cfa_offset (\frame_adjustment)
- .cfi_rel_offset \reg1, 0
- .cfi_rel_offset \reg2, 8
-.endm
-
-/*
- * Restore two registers from the bottom of the stack and decrease frame size.
- */
-.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment
- ldp \reg1, \reg2, [sp], #(\frame_adjustment)
- .cfi_restore \reg1
- .cfi_restore \reg2
- .cfi_adjust_cfa_offset -(\frame_adjustment)
-.endm
-
-/*
- * cfi support macros.
- */
-.macro ENTRY name
- .type \name, #function
- .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
- .global \name
- /* Cache alignment for function entry */
- .balign 16
-\name:
- .cfi_startproc
-.endm
-
-.macro END name
- .cfi_endproc
- .size \name, .-\name
-.endm
-
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- .text
-
-/*
- * Interpreter entry point.
- * On entry:
- * x0 Thread* self/
- * x1 insns_
- * x2 ShadowFrame
- * x3 JValue* result_register
- *
- */
-ENTRY ExecuteMterpImpl
- SAVE_TWO_REGS_INCREASE_FRAME xPROFILE, x27, 80
- SAVE_TWO_REGS xIBASE, xREFS, 16
- SAVE_TWO_REGS xSELF, xINST, 32
- SAVE_TWO_REGS xPC, xFP, 48
- SAVE_TWO_REGS fp, lr, 64
- add fp, sp, #64
-
- /* Remember the return register */
- str x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
-
- /* Remember the dex instruction pointer */
- str x1, [x2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
-
- /* set up "named" registers */
- mov xSELF, x0
- ldr w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
- add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to vregs.
- add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame
- ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
- add xPC, x1, w0, lsl #1 // Create direct pointer to 1st dex opcode
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-
- /* Set up for backwards branches & osr profiling */
- ldr x0, [xFP, #OFF_FP_METHOD]
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xSELF
- bl MterpSetUpHotnessCountdown
- mov wPROFILE, w0 // Starting hotness countdown to xPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST // load wINST from rPC
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
- /* NOTE: no fallthrough */
-
- .type artMterpAsmInstructionStart, #object
- .hidden artMterpAsmInstructionStart
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_op_nop: /* 0x00 */
- FETCH_ADVANCE_INST 1 // advance to next instr, load rINST
- GET_INST_OPCODE ip // ip<- opcode from rINST
- GOTO_OPCODE ip // execute it
-
-/* ------------------------------ */
- .balign 128
-.L_op_move: /* 0x01 */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- lsr w1, wINST, #12 // x1<- B from 15:12
- ubfx w0, wINST, #8, #4 // x0<- A from 11:8
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_VREG w2, w1 // x2<- fp[B]
- GET_INST_OPCODE ip // ip<- opcode from wINST
- .if 0
- SET_VREG_OBJECT w2, w0 // fp[A]<- x2
- .else
- SET_VREG w2, w0 // fp[A]<- x2
- .endif
- GOTO_OPCODE ip // execute next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_from16: /* 0x02 */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH w1, 1 // r1<- BBBB
- lsr w0, wINST, #8 // r0<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_VREG w2, w1 // r2<- fp[BBBB]
- GET_INST_OPCODE ip // extract opcode from wINST
- .if 0
- SET_VREG_OBJECT w2, w0 // fp[AA]<- r2
- .else
- SET_VREG w2, w0 // fp[AA]<- r2
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_16: /* 0x03 */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH w1, 2 // w1<- BBBB
- FETCH w0, 1 // w0<- AAAA
- FETCH_ADVANCE_INST 3 // advance xPC, load xINST
- GET_VREG w2, w1 // w2<- fp[BBBB]
- GET_INST_OPCODE ip // extract opcode from xINST
- .if 0
- SET_VREG_OBJECT w2, w0 // fp[AAAA]<- w2
- .else
- SET_VREG w2, w0 // fp[AAAA]<- w2
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide: /* 0x04 */
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- lsr w3, wINST, #12 // w3<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x3, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x3, w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_from16: /* 0x05 */
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH w3, 1 // w3<- BBBB
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG_WIDE x3, w3
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x3, w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_16: /* 0x06 */
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH w3, 2 // w3<- BBBB
- FETCH w2, 1 // w2<- AAAA
- GET_VREG_WIDE x3, w3
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- SET_VREG_WIDE x3, w2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object: /* 0x07 */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- lsr w1, wINST, #12 // x1<- B from 15:12
- ubfx w0, wINST, #8, #4 // x0<- A from 11:8
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_VREG w2, w1 // x2<- fp[B]
- GET_INST_OPCODE ip // ip<- opcode from wINST
- .if 1
- SET_VREG_OBJECT w2, w0 // fp[A]<- x2
- .else
- SET_VREG w2, w0 // fp[A]<- x2
- .endif
- GOTO_OPCODE ip // execute next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_from16: /* 0x08 */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH w1, 1 // r1<- BBBB
- lsr w0, wINST, #8 // r0<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_VREG w2, w1 // r2<- fp[BBBB]
- GET_INST_OPCODE ip // extract opcode from wINST
- .if 1
- SET_VREG_OBJECT w2, w0 // fp[AA]<- r2
- .else
- SET_VREG w2, w0 // fp[AA]<- r2
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_16: /* 0x09 */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH w1, 2 // w1<- BBBB
- FETCH w0, 1 // w0<- AAAA
- FETCH_ADVANCE_INST 3 // advance xPC, load xINST
- GET_VREG w2, w1 // w2<- fp[BBBB]
- GET_INST_OPCODE ip // extract opcode from xINST
- .if 1
- SET_VREG_OBJECT w2, w0 // fp[AAAA]<- w2
- .else
- SET_VREG w2, w0 // fp[AAAA]<- w2
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result: /* 0x0a */
- /* for: move-result, move-result-object */
- /* op vAA */
- lsr w2, wINST, #8 // r2<- AA
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
- ldr w0, [x0] // r0 <- result.i.
- GET_INST_OPCODE ip // extract opcode from wINST
- .if 0
- SET_VREG_OBJECT w0, w2, w1 // fp[AA]<- r0
- .else
- SET_VREG w0, w2 // fp[AA]<- r0
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_wide: /* 0x0b */
- /* for: move-result-wide */
- /* op vAA */
- lsr w2, wINST, #8 // r2<- AA
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
- ldr x0, [x0] // r0 <- result.i.
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, x2 // fp[AA]<- r0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_object: /* 0x0c */
- /* for: move-result, move-result-object */
- /* op vAA */
- lsr w2, wINST, #8 // r2<- AA
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
- ldr w0, [x0] // r0 <- result.i.
- GET_INST_OPCODE ip // extract opcode from wINST
- .if 1
- SET_VREG_OBJECT w0, w2, w1 // fp[AA]<- r0
- .else
- SET_VREG w0, w2 // fp[AA]<- r0
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_exception: /* 0x0d */
- /* move-exception vAA */
- lsr w2, wINST, #8 // w2<- AA
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- mov x1, #0 // w1<- 0
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- SET_VREG_OBJECT w3, w2 // fp[AA]<- exception obj
- GET_INST_OPCODE ip // extract opcode from rINST
- str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // clear exception
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void: /* 0x0e */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .Lop_return_void_check
-.Lop_return_void_return:
- mov x0, #0
- b MterpReturn
-.Lop_return_void_check:
- bl MterpSuspendCheck // (self)
- b .Lop_return_void_return
-
-/* ------------------------------ */
- .balign 128
-.L_op_return: /* 0x0f */
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .Lop_return_check
-.Lop_return_return:
- lsr w2, wINST, #8 // r2<- AA
- GET_VREG w0, w2 // r0<- vAA
- b MterpReturn
-.Lop_return_check:
- bl MterpSuspendCheck // (self)
- b .Lop_return_return
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_wide: /* 0x10 */
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .Lop_return_wide_check
-.Lop_return_wide_return:
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG_WIDE x0, w2 // x0<- vAA
- b MterpReturn
-.Lop_return_wide_check:
- bl MterpSuspendCheck // (self)
- b .Lop_return_wide_return
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_object: /* 0x11 */
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .Lop_return_object_check
-.Lop_return_object_return:
- lsr w2, wINST, #8 // r2<- AA
- GET_VREG w0, w2 // r0<- vAA
- b MterpReturn
-.Lop_return_object_check:
- bl MterpSuspendCheck // (self)
- b .Lop_return_object_return
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_4: /* 0x12 */
- /* const/4 vA, #+B */
- sbfx w1, wINST, #12, #4 // w1<- sssssssB
- ubfx w0, wINST, #8, #4 // w0<- A
- FETCH_ADVANCE_INST 1 // advance xPC, load wINST
- GET_INST_OPCODE ip // ip<- opcode from xINST
- SET_VREG w1, w0 // fp[A]<- w1
- GOTO_OPCODE ip // execute next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_16: /* 0x13 */
- /* const/16 vAA, #+BBBB */
- FETCH_S w0, 1 // w0<- ssssBBBB (sign-extended)
- lsr w3, wINST, #8 // w3<- AA
- FETCH_ADVANCE_INST 2 // advance xPC, load wINST
- SET_VREG w0, w3 // vAA<- w0
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const: /* 0x14 */
- /* const vAA, #+BBBBbbbb */
- lsr w3, wINST, #8 // w3<- AA
- FETCH w0, 1 // w0<- bbbb (low
- FETCH w1, 2 // w1<- BBBB (high
- FETCH_ADVANCE_INST 3 // advance rPC, load wINST
- orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG w0, w3 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_high16: /* 0x15 */
- /* const/high16 vAA, #+BBBB0000 */
- FETCH w0, 1 // r0<- 0000BBBB (zero-extended)
- lsr w3, wINST, #8 // r3<- AA
- lsl w0, w0, #16 // r0<- BBBB0000
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- SET_VREG w0, w3 // vAA<- r0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_16: /* 0x16 */
- /* const-wide/16 vAA, #+BBBB */
- FETCH_S x0, 1 // x0<- ssssssssssssBBBB (sign-extended)
- lsr w3, wINST, #8 // w3<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w3
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_32: /* 0x17 */
- /* const-wide/32 vAA, #+BBBBbbbb */
- FETCH w0, 1 // x0<- 000000000000bbbb (low)
- lsr w3, wINST, #8 // w3<- AA
- FETCH_S x2, 2 // x2<- ssssssssssssBBBB (high)
- FETCH_ADVANCE_INST 3 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- orr x0, x0, x2, lsl #16 // x0<- ssssssssBBBBbbbb
- SET_VREG_WIDE x0, w3
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide: /* 0x18 */
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- FETCH w0, 1 // w0<- bbbb (low)
- FETCH w1, 2 // w1<- BBBB (low middle)
- FETCH w2, 3 // w2<- hhhh (high middle)
- FETCH w3, 4 // w3<- HHHH (high)
- lsr w4, wINST, #8 // r4<- AA
- FETCH_ADVANCE_INST 5 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
- orr x0, x0, x2, lsl #32 // w0<- hhhhBBBBbbbb
- orr x0, x0, x3, lsl #48 // w0<- HHHHhhhhBBBBbbbb
- SET_VREG_WIDE x0, w4
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_high16: /* 0x19 */
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- FETCH w0, 1 // w0<- 0000BBBB (zero-extended)
- lsr w1, wINST, #8 // w1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- lsl x0, x0, #48
- SET_VREG_WIDE x0, w1
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string: /* 0x1a */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstString
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstString // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 // load rINST
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
- /* const/string vAA, String//BBBBBBBB */
- EXPORT_PC
- FETCH w0, 1 // w0<- bbbb (low
- FETCH w2, 2 // w2<- BBBB (high
- lsr w1, wINST, #8 // w1<- AA
- orr w0, w0, w2, lsl #16 // w1<- BBBBbbbb
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstString // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 3 // advance rPC
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 3 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_class: /* 0x1c */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstClass
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstClass // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 // load rINST
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_enter: /* 0x1d */
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG w0, w2 // w0<- vAA (object)
- mov x1, xSELF // w1<- self
- bl artLockObjectFromCode
- cbnz w0, MterpException
- FETCH_ADVANCE_INST 1
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_exit: /* 0x1e */
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG w0, w2 // w0<- vAA (object)
- mov x1, xSELF // w0<- self
- bl artUnlockObjectFromCode // w0<- success for unlock(self, obj)
- cbnz w0, MterpException
- FETCH_ADVANCE_INST 1 // before throw: advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_check_cast: /* 0x1f */
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class//BBBB */
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
- mov x3, xSELF // w3<- self
- bl MterpCheckCast // (index, &obj, method, self)
- PREFETCH_INST 2
- cbnz w0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_instance_of: /* 0x20 */
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class//CCCC */
- EXPORT_PC
- FETCH w0, 1 // w0<- CCCC
- lsr w1, wINST, #12 // w1<- B
- VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
- mov x3, xSELF // w3<- self
- bl MterpInstanceOf // (index, &obj, method, self)
- ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz x1, MterpException
- ADVANCE 2 // advance rPC
- SET_VREG w0, w2 // vA<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_array_length: /* 0x21 */
- /*
- * Return the length of an array.
- */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w0, w1 // w0<- vB (object ref)
- cbz w0, common_errNullObject // yup, fail
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- array length
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w3, w2 // vB<- length
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_instance: /* 0x22 */
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class//BBBB */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xSELF
- mov w2, wINST
- bl MterpNewInstance // (shadow_frame, self, inst_data)
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_array: /* 0x23 */
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class//CCCC */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov w2, wINST
- mov x3, xSELF
- bl MterpNewArray
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array: /* 0x24 */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
- .extern MterpFilledNewArray
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov x2, xSELF
- bl MterpFilledNewArray
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
- .extern MterpFilledNewArrayRange
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov x2, xSELF
- bl MterpFilledNewArrayRange
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_fill_array_data: /* 0x26 */
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- FETCH w0, 1 // x0<- 000000000000bbbb (lo)
- FETCH_S x1, 2 // x1<- ssssssssssssBBBB (hi)
- lsr w3, wINST, #8 // w3<- AA
- orr x1, x0, x1, lsl #16 // x1<- ssssssssBBBBbbbb
- GET_VREG w0, w3 // w0<- vAA (array object)
- add x1, xPC, x1, lsl #1 // x1<- PC + ssssssssBBBBbbbb*2 (array data off.)
- bl MterpFillArrayData // (obj, payload)
- cbz w0, MterpPossibleException // exception?
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_throw: /* 0x27 */
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- lsr w2, wINST, #8 // r2<- AA
- GET_VREG w1, w2 // r1<- vAA (exception object)
- cbz w1, common_errNullObject
- str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // thread->exception<- obj
- b MterpException
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto: /* 0x28 */
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- sbfx wINST, wINST, #8, #8 // wINST<- ssssssAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_16: /* 0x29 */
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- FETCH_S wINST, 1 // wINST<- ssssAAAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_32: /* 0x2a */
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0". Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- FETCH w0, 1 // w0<- aaaa (lo)
- FETCH w1, 2 // w1<- AAAA (hi)
- orr wINST, w0, w1, lsl #16 // wINST<- AAAAaaaa
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_packed_switch: /* 0x2b */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH w0, 1 // x0<- 000000000000bbbb (lo)
- FETCH_S x1, 2 // x1<- ssssssssssssBBBB (hi)
- lsr w3, wINST, #8 // w3<- AA
- orr x0, x0, x1, lsl #16 // x0<- ssssssssBBBBbbbb
- GET_VREG w1, w3 // w1<- vAA
- add x0, xPC, x0, lsl #1 // x0<- PC + ssssssssBBBBbbbb*2
- bl MterpDoPackedSwitch // w0<- code-unit branch offset
- sxtw xINST, w0
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_sparse_switch: /* 0x2c */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH w0, 1 // x0<- 000000000000bbbb (lo)
- FETCH_S x1, 2 // x1<- ssssssssssssBBBB (hi)
- lsr w3, wINST, #8 // w3<- AA
- orr x0, x0, x1, lsl #16 // x0<- ssssssssBBBBbbbb
- GET_VREG w1, w3 // w1<- vAA
- add x0, xPC, x0, lsl #1 // x0<- PC + ssssssssBBBBbbbb*2
- bl MterpDoSparseSwitch // w0<- code-unit branch offset
- sxtw xINST, w0
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_float: /* 0x2d */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- */
- /* op vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG s1, w2
- GET_VREG s2, w3
- fcmp s1, s2
- cset w0, ne
- cneg w0, w0, lt
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w4 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_float: /* 0x2e */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- */
- /* op vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG s1, w2
- GET_VREG s2, w3
- fcmp s1, s2
- cset w0, ne
- cneg w0, w0, cc
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w4 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_double: /* 0x2f */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- */
- /* op vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG_WIDE d1, w2
- GET_VREG_WIDE d2, w3
- fcmp d1, d2
- cset w0, ne
- cneg w0, w0, lt
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w4 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_double: /* 0x30 */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- */
- /* op vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG_WIDE d1, w2
- GET_VREG_WIDE d2, w3
- fcmp d1, d2
- cset w0, ne
- cneg w0, w0, cc
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w4 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmp_long: /* 0x31 */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG_WIDE x1, w2
- GET_VREG_WIDE x2, w3
- cmp x1, x2
- cset w0, ne
- cneg w0, w0, lt
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- SET_VREG w0, w4
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eq: /* 0x32 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.eq MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ne: /* 0x33 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.ne MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lt: /* 0x34 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.lt MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ge: /* 0x35 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.ge MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gt: /* 0x36 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.gt MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_le: /* 0x37 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.le MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eqz: /* 0x38 */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if 0
- cmp w2, #0 // compare (vA, 0)
- .endif
- cbz w2, MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_nez: /* 0x39 */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if 0
- cmp w2, #0 // compare (vA, 0)
- .endif
- cbnz w2, MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ltz: /* 0x3a */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if 0
- cmp w2, #0 // compare (vA, 0)
- .endif
- tbnz w2, #31, MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gez: /* 0x3b */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if 0
- cmp w2, #0 // compare (vA, 0)
- .endif
- tbz w2, #31, MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gtz: /* 0x3c */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if 1
- cmp w2, #0 // compare (vA, 0)
- .endif
- b.gt MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lez: /* 0x3d */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if 1
- cmp w2, #0 // compare (vA, 0)
- .endif
- b.le MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3e: /* 0x3e */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3f: /* 0x3f */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_40: /* 0x40 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_41: /* 0x41 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_42: /* 0x42 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_43: /* 0x43 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget: /* 0x44 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz x0, common_errNullObject // bail if null array object.
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #2 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- ldr w2, [x0, #MIRROR_INT_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w2, w9 // vAA<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_wide: /* 0x45 */
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- */
- /* aget-wide vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // yes, bail
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- ldr x2, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] // x2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x2, w4
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_object: /* 0x46 */
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- FETCH_B w3, 1, 1 // w3<- CC
- EXPORT_PC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- bl artAGetObjectFromMterp // (array, index)
- ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
- lsr w2, wINST, #8 // w9<- AA
- PREFETCH_INST 2
- cbnz w1, MterpException
- SET_VREG_OBJECT w0, w2
- ADVANCE 2
- GET_INST_OPCODE ip
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_boolean: /* 0x47 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz x0, common_errNullObject // bail if null array object.
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #0 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- ldrb w2, [x0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w2, w9 // vAA<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_byte: /* 0x48 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz x0, common_errNullObject // bail if null array object.
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #0 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- ldrsb w2, [x0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w2, w9 // vAA<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_char: /* 0x49 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz x0, common_errNullObject // bail if null array object.
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #1 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- ldrh w2, [x0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w2, w9 // vAA<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_short: /* 0x4a */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz x0, common_errNullObject // bail if null array object.
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #1 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- ldrsh w2, [x0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w2, w9 // vAA<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput: /* 0x4b */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #2 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_VREG w2, w9 // w2<- vAA
- GET_INST_OPCODE ip // extract opcode from rINST
- str w2, [x0, #MIRROR_INT_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_wide: /* 0x4c */
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- */
- /* aput-wide vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- GET_VREG_WIDE x1, w4
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- str x1, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_object: /* 0x4d */
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov w2, wINST
- bl MterpAputObject
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_boolean: /* 0x4e */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #0 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_VREG w2, w9 // w2<- vAA
- GET_INST_OPCODE ip // extract opcode from rINST
- strb w2, [x0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_byte: /* 0x4f */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #0 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_VREG w2, w9 // w2<- vAA
- GET_INST_OPCODE ip // extract opcode from rINST
- strb w2, [x0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_char: /* 0x50 */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #1 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_VREG w2, w9 // w2<- vAA
- GET_INST_OPCODE ip // extract opcode from rINST
- strh w2, [x0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_short: /* 0x51 */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #1 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_VREG w2, w9 // w2<- vAA
- GET_INST_OPCODE ip // extract opcode from rINST
- strh w2, [x0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget: /* 0x52 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU32
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIGetU32
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide: /* 0x53 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU64
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIGetU64
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object: /* 0x54 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetObj
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIGetObj
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean: /* 0x55 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU8
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIGetU8
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte: /* 0x56 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetI8
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIGetI8
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char: /* 0x57 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU16
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIGetU16
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short: /* 0x58 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetI16
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIGetI16
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput: /* 0x59 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU32
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIPutU32
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide: /* 0x5a */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU64
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIPutU64
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object: /* 0x5b */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutObj
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIPutObj
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean: /* 0x5c */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU8
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIPutU8
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte: /* 0x5d */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutI8
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIPutI8
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char: /* 0x5e */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU16
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIPutU16
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short: /* 0x5f */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutI16
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpIPutI16
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget: /* 0x60 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU32
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSGetU32
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_wide: /* 0x61 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU64
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSGetU64
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_object: /* 0x62 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetObj
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSGetObj
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_boolean: /* 0x63 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU8
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSGetU8
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_byte: /* 0x64 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetI8
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSGetI8
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_char: /* 0x65 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU16
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSGetU16
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_short: /* 0x66 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetI16
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSGetI16
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput: /* 0x67 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU32
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSPutU32
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_wide: /* 0x68 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU64
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSPutU64
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_object: /* 0x69 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutObj
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSPutObj
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_boolean: /* 0x6a */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU8
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSPutU8
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_byte: /* 0x6b */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutI8
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSPutI8
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_char: /* 0x6c */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU16
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSPutU16
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_short: /* 0x6d */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutI16
- mov x0, xPC // arg0: Instruction* inst
- mov x1, xINST // arg1: uint16_t inst_data
- add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
- mov x3, xSELF // arg3: Thread* self
- PREFETCH_INST 2 // prefetch next opcode
- bl MterpSPutI16
- cbz x0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual: /* 0x6e */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtual
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeVirtual
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
- /*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super: /* 0x6f */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuper
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeSuper
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
- /*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct: /* 0x70 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirect
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeDirect
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static: /* 0x71 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStatic
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeStatic
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface: /* 0x72 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterface
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeInterface
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
- /*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .Lop_return_void_no_barrier_check
-.Lop_return_void_no_barrier_return:
- mov x0, #0
- b MterpReturn
-.Lop_return_void_no_barrier_check:
- bl MterpSuspendCheck // (self)
- b .Lop_return_void_no_barrier_return
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeVirtualRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super_range: /* 0x75 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuperRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeSuperRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirectRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeDirectRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static_range: /* 0x77 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStaticRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeStaticRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterfaceRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeInterfaceRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_79: /* 0x79 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_7a: /* 0x7a */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_int: /* 0x7b */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op w0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- GET_VREG w0, w3 // w0<- vB
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- sub w0, wzr, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 8-9 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_int: /* 0x7c */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op w0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- GET_VREG w0, w3 // w0<- vB
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- mvn w0, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 8-9 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_long: /* 0x7d */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op x0".
- *
- * For: neg-long, not-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE x0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- sub x0, xzr, x0
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4
- GOTO_OPCODE ip // jump to next instruction
- /* 10-11 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_long: /* 0x7e */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op x0".
- *
- * For: neg-long, not-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE x0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- mvn x0, x0
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4
- GOTO_OPCODE ip // jump to next instruction
- /* 10-11 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_float: /* 0x7f */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op w0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- GET_VREG w0, w3 // w0<- vB
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- eor w0, w0, #0x80000000 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 8-9 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_double: /* 0x80 */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op x0".
- *
- * For: neg-long, not-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE x0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- eor x0, x0, #0x8000000000000000
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4
- GOTO_OPCODE ip // jump to next instruction
- /* 10-11 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_long: /* 0x81 */
- /* int-to-long vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_S x0, w3 // x0<- sign_extend(fp[B])
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4 // fp[A]<- x0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_float: /* 0x82 */
- /*
- * Generic 32bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op w0".
- *
- * For: int-to-float, float-to-int
- * TODO: refactor all of the conversions - parameterize width and use same template.
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG w0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- scvtf s0, w0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG s0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_double: /* 0x83 */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op w0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG w0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- scvtf d0, w0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE d0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- lsr w1, wINST, #12 // x1<- B from 15:12
- ubfx w0, wINST, #8, #4 // x0<- A from 11:8
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_VREG w2, w1 // x2<- fp[B]
- GET_INST_OPCODE ip // ip<- opcode from wINST
- .if 0
- SET_VREG_OBJECT w2, w0 // fp[A]<- x2
- .else
- SET_VREG w2, w0 // fp[A]<- x2
- .endif
- GOTO_OPCODE ip // execute next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_float: /* 0x85 */
- /*
- * Generic 64bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op x0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE x0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- scvtf s0, x0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG s0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_double: /* 0x86 */
- /*
- * Generic 64bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op x0".
- *
- * For: long-to-double, double-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE x0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- scvtf d0, x0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE d0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_int: /* 0x87 */
- /*
- * Generic 32bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "w0 = op s0".
- *
- * For: int-to-float, float-to-int
- * TODO: refactor all of the conversions - parameterize width and use same template.
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG s0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- fcvtzs w0, s0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG w0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_long: /* 0x88 */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "x0 = op s0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG s0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- fcvtzs x0, s0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_double: /* 0x89 */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op s0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG s0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- fcvt d0, s0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE d0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_int: /* 0x8a */
- /*
- * Generic 64bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "w0 = op d0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE d0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- fcvtzs w0, d0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG w0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_long: /* 0x8b */
- /*
- * Generic 64bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "x0 = op d0".
- *
- * For: long-to-double, double-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE d0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- fcvtzs x0, d0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_float: /* 0x8c */
- /*
- * Generic 64bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op d0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE d0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- fcvt s0, d0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG s0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_byte: /* 0x8d */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op w0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- GET_VREG w0, w3 // w0<- vB
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- sxtb w0, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 8-9 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_char: /* 0x8e */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op w0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- GET_VREG w0, w3 // w0<- vB
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- uxth w0, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 8-9 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_short: /* 0x8f */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op w0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- GET_VREG w0, w3 // w0<- vB
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- sxth w0, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 8-9 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int: /* 0x90 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- add w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int: /* 0x91 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- sub w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int: /* 0x92 */
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- mul w0, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int: /* 0x93 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 1
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- sdiv w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int: /* 0x94 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 1
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- sdiv w2, w0, w1 // optional op; may set condition codes
- msub w0, w2, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int: /* 0x95 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- and w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int: /* 0x96 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- orr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int: /* 0x97 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- eor w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int: /* 0x98 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- lsl w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int: /* 0x99 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- asr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int: /* 0x9a */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- lsr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long: /* 0x9b */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 0
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- add x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long: /* 0x9c */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 0
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- sub x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long: /* 0x9d */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 0
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- mul x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long: /* 0x9e */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 1
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- sdiv x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long: /* 0x9f */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 1
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- sdiv x3, x1, x2
- msub x0, x3, x2, x1 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long: /* 0xa0 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 0
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- and x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long: /* 0xa1 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 0
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- orr x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long: /* 0xa2 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 0
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- eor x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long: /* 0xa3 */
- /*
- * 64-bit shift operation.
- *
- * For: shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w3, wINST, #8 // w3<- AA
- lsr w2, w0, #8 // w2<- CC
- GET_VREG w2, w2 // w2<- vCC (shift count)
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x1, w1 // x1<- vBB
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- lsl x0, x1, x2 // Do the shift. Only low 6 bits of x2 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w3 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long: /* 0xa4 */
- /*
- * 64-bit shift operation.
- *
- * For: shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w3, wINST, #8 // w3<- AA
- lsr w2, w0, #8 // w2<- CC
- GET_VREG w2, w2 // w2<- vCC (shift count)
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x1, w1 // x1<- vBB
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- asr x0, x1, x2 // Do the shift. Only low 6 bits of x2 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w3 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long: /* 0xa5 */
- /*
- * 64-bit shift operation.
- *
- * For: shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w3, wINST, #8 // w3<- AA
- lsr w2, w0, #8 // w2<- CC
- GET_VREG w2, w2 // w2<- vCC (shift count)
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x1, w1 // x1<- vBB
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- lsr x0, x1, x2 // Do the shift. Only low 6 bits of x2 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w3 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float: /* 0xa6 */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float
- * form: <op> s0, s0, s1
- */
- /* floatop vAA, vBB, vCC */
- FETCH w0, 1 // r0<- CCBB
- lsr w1, w0, #8 // r2<- CC
- and w0, w0, #255 // r1<- BB
- GET_VREG s1, w1
- GET_VREG s0, w0
- fadd s0, s0, s1 // s0<- op
- lsr w1, wINST, #8 // r1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w1
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float: /* 0xa7 */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float
- * form: <op> s0, s0, s1
- */
- /* floatop vAA, vBB, vCC */
- FETCH w0, 1 // r0<- CCBB
- lsr w1, w0, #8 // r2<- CC
- and w0, w0, #255 // r1<- BB
- GET_VREG s1, w1
- GET_VREG s0, w0
- fsub s0, s0, s1 // s0<- op
- lsr w1, wINST, #8 // r1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w1
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float: /* 0xa8 */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float
- * form: <op> s0, s0, s1
- */
- /* floatop vAA, vBB, vCC */
- FETCH w0, 1 // r0<- CCBB
- lsr w1, w0, #8 // r2<- CC
- and w0, w0, #255 // r1<- BB
- GET_VREG s1, w1
- GET_VREG s0, w0
- fmul s0, s0, s1 // s0<- op
- lsr w1, wINST, #8 // r1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w1
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float: /* 0xa9 */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float
- * form: <op> s0, s0, s1
- */
- /* floatop vAA, vBB, vCC */
- FETCH w0, 1 // r0<- CCBB
- lsr w1, w0, #8 // r2<- CC
- and w0, w0, #255 // r1<- BB
- GET_VREG s1, w1
- GET_VREG s0, w0
- fdiv s0, s0, s1 // s0<- op
- lsr w1, wINST, #8 // r1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w1
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float: /* 0xaa */
-/* EABI doesn't define a float remainder function, but libm does */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float
- * form: <op> s0, s0, s1
- */
- /* floatop vAA, vBB, vCC */
- FETCH w0, 1 // r0<- CCBB
- lsr w1, w0, #8 // r2<- CC
- and w0, w0, #255 // r1<- BB
- GET_VREG s1, w1
- GET_VREG s0, w0
- bl fmodf // s0<- op
- lsr w1, wINST, #8 // r1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w1
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double: /* 0xab */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE d2, w2 // w2<- vCC
- GET_VREG_WIDE d1, w1 // w1<- vBB
- .if 0
- cbz d2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- fadd d0, d1, d2 // d0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w4 // vAA<- d0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double: /* 0xac */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE d2, w2 // w2<- vCC
- GET_VREG_WIDE d1, w1 // w1<- vBB
- .if 0
- cbz d2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- fsub d0, d1, d2 // d0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w4 // vAA<- d0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double: /* 0xad */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE d2, w2 // w2<- vCC
- GET_VREG_WIDE d1, w1 // w1<- vBB
- .if 0
- cbz d2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- fmul d0, d1, d2 // d0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w4 // vAA<- d0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double: /* 0xae */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE d2, w2 // w2<- vCC
- GET_VREG_WIDE d1, w1 // w1<- vBB
- .if 0
- cbz d2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- fdiv d0, d1, d2 // d0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w4 // vAA<- d0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double: /* 0xaf */
- /* rem vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE d1, w2 // d1<- vCC
- GET_VREG_WIDE d0, w1 // d0<- vBB
- bl fmod
- lsr w4, wINST, #8 // w4<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w4 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- add w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- sub w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- mul w0, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 1
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- sdiv w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 1
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- sdiv w2, w0, w1 // optional op; may set condition codes
- msub w0, w2, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- and w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- orr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- eor w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- lsl w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- asr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- lsr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long_2addr: /* 0xbb */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 0
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- add x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 0
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- sub x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 0
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- mul x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long_2addr: /* 0xbe */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 1
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- sdiv x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 1
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- sdiv x3, x0, x1
- msub x0, x3, x1, x0 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 0
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- and x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 0
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- orr x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 0
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- eor x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
- /*
- * Generic 64-bit shift operation.
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- lsl x0, x0, x1 // Do the shift. Only low 6 bits of x1 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
- /*
- * Generic 64-bit shift operation.
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- asr x0, x0, x1 // Do the shift. Only low 6 bits of x1 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
- /*
- * Generic 64-bit shift operation.
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- lsr x0, x0, x1 // Do the shift. Only low 6 bits of x1 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG s1, w3
- GET_VREG s0, w9
- fadd s2, s0, s1 // s2<- op
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s2, w9
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG s1, w3
- GET_VREG s0, w9
- fsub s2, s0, s1 // s2<- op
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s2, w9
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG s1, w3
- GET_VREG s0, w9
- fmul s2, s0, s1 // s2<- op
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s2, w9
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG s1, w3
- GET_VREG s0, w9
- fdiv s2, s0, s1 // s2<- op
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s2, w9
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float_2addr: /* 0xca */
- /* rem vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG s1, w3
- GET_VREG s0, w9
- bl fmodf
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w9
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double_2addr: /* 0xcb */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE d1, w1 // x1<- vB
- GET_VREG_WIDE d0, w2 // x0<- vA
- .if 0
- cbz d1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- fadd d0, d0, d1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE d1, w1 // x1<- vB
- GET_VREG_WIDE d0, w2 // x0<- vA
- .if 0
- cbz d1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- fsub d0, d0, d1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE d1, w1 // x1<- vB
- GET_VREG_WIDE d0, w2 // x0<- vA
- .if 0
- cbz d1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- fmul d0, d0, d1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double_2addr: /* 0xce */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE d1, w1 // x1<- vB
- GET_VREG_WIDE d0, w2 // x0<- vA
- .if 0
- cbz d1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- fdiv d0, d0, d1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
- /* rem vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE d1, w1 // d1<- vB
- GET_VREG_WIDE d0, w2 // d0<- vA
- bl fmod
- ubfx w2, wINST, #8, #4 // w2<- A (need to reload - killed across call)
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- add w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- sub w0, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- mul w0, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 1
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- sdiv w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 1
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- sdiv w3, w0, w1
- msub w0, w3, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- and w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- orr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- eor w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- add w0, w0, w3, asr #8 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- sub w0, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- mul w0, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit8: /* 0xdb */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // optional; typically w1<- ssssssCC (sign extended)
- .if 1
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- sdiv w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // optional; typically w1<- ssssssCC (sign extended)
- .if 1
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- sdiv w3, w0, w1 // optional op; may set condition codes
- msub w0, w3, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit8: /* 0xdd */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- and w0, w0, w3, asr #8 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit8: /* 0xde */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- orr w0, w0, w3, asr #8 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- eor w0, w0, w3, asr #8 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- ubfx w1, w3, #8, #5 // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- lsl w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- ubfx w1, w3, #8, #5 // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- asr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- ubfx w1, w3, #8, #5 // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- lsr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_quick: /* 0xe3 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- ldr w0, [x3, x1] // w0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- SET_VREG w0, w2 // fp[A]<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
- /* iget-wide-quick vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w4, 1 // w4<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- ldr x0, [x3, x4] // x0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- SET_VREG_WIDE x0, w2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
- /* For: iget-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- EXPORT_PC
- GET_VREG w0, w2 // w0<- object we're operating on
- bl artIGetObjectFromMterp // (obj, offset)
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz w3, MterpPossibleException // bail out
- SET_VREG_OBJECT w0, w2 // fp[A]<- w0
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_quick: /* 0xe6 */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- GET_VREG w0, w2 // w0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- str w0, [x3, x1] // obj.field<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
- /* iput-wide-quick vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w3, 1 // w3<- field byte offset
- GET_VREG w2, w2 // w2<- fp[B], the object pointer
- ubfx w0, wINST, #8, #4 // w0<- A
- cbz w2, common_errNullObject // object was null
- GET_VREG_WIDE x0, w0 // x0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- str x0, [x2, x3] // obj.field<- x0
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov w2, wINST
- bl MterpIputObjectQuick
- cbz w0, MterpException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuick
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeVirtualQuick
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuickRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeVirtualQuickRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- GET_VREG w0, w2 // w0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- strb w0, [x3, x1] // obj.field<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte_quick: /* 0xec */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- GET_VREG w0, w2 // w0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- strb w0, [x3, x1] // obj.field<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char_quick: /* 0xed */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- GET_VREG w0, w2 // w0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- strh w0, [x3, x1] // obj.field<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short_quick: /* 0xee */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- GET_VREG w0, w2 // w0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- strh w0, [x3, x1] // obj.field<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- ldrb w0, [x3, x1] // w0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- SET_VREG w0, w2 // fp[A]<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- ldrsb w0, [x3, x1] // w0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- SET_VREG w0, w2 // fp[A]<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- ldrh w0, [x3, x1] // w0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- SET_VREG w0, w2 // fp[A]<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- ldrsh w0, [x3, x1] // w0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- SET_VREG w0, w2 // fp[A]<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphic
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokePolymorphic
- cbz w0, MterpException
- FETCH_ADVANCE_INST 4
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphicRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokePolymorphicRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 4
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom: /* 0xfc */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustom
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeCustom
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustomRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeCustomRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_handle: /* 0xfe */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodHandle
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstMethodHandle // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 // load rINST
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_type: /* 0xff */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodType
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstMethodType // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 // load rINST
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
- .balign 128
-
- .type artMterpAsmInstructionEnd, #object
- .hidden artMterpAsmInstructionEnd
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogDivideByZeroException
-#endif
- b MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogArrayIndexException
-#endif
- b MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNegativeArraySizeException
-#endif
- b MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNoSuchMethodException
-#endif
- b MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNullObjectException
-#endif
- b MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogExceptionThrownException
-#endif
- b MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- ldr x2, [xSELF, #THREAD_FLAGS_OFFSET]
- bl MterpLogSuspendFallback
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- ldr x0, [xSELF, #THREAD_EXCEPTION_OFFSET]
- cbz x0, MterpFallback // If not, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpHandleException // (self, shadow_frame)
- cbz w0, MterpExceptionReturn // no local catch, back to caller.
- ldr x0, [xFP, #OFF_FP_DEX_INSTRUCTIONS]
- ldr w1, [xFP, #OFF_FP_DEX_PC]
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
- add xPC, x0, x1, lsl #1 // generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- /* resume execution at catch block */
- EXPORT_PC
- FETCH_INST
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
- /* NOTE: no fallthrough */
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * wINST <= signed offset
- * wPROFILE <= signed hotness countdown (expanded to 32 bits)
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
- cmp wINST, #0
- b.gt .L_forward_branch // don't add forward branches to hotness
- tbnz wPROFILE, #31, .L_no_count_backwards // go if negative
- subs wPROFILE, wPROFILE, #1 // countdown
- b.eq .L_add_batch // counted down to zero - report
-.L_resume_backward_branch:
- ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
- add w2, wINST, wINST // w2<- byte offset
- FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
- REFRESH_IBASE
- ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .L_suspend_request_pending
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC
- mov x0, xSELF
- bl MterpSuspendCheck // (self)
- cbnz x0, MterpFallback
- REFRESH_IBASE // might have changed during suspend
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_no_count_backwards:
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.ne .L_resume_backward_branch
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_osr_forward
-.L_resume_forward_branch:
- add w2, wINST, wINST // w2<- byte offset
- FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_check_osr_forward:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- add x1, xFP, #OFF_FP_SHADOWFRAME
- strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- ldr x0, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl MterpAddHotnessBatch // (method, shadow_frame, self)
- mov wPROFILE, w0 // restore new hotness countdown to wPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, #2
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/*
- * Check for suspend check request. Assumes wINST already loaded, xPC advanced and
- * still needs to get the opcode and branch to it, and flags are in lr.
- */
-MterpCheckSuspendAndContinue:
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne check1
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-check1:
- EXPORT_PC
- mov x0, xSELF
- bl MterpSuspendCheck // (self)
- cbnz x0, MterpFallback // Something in the environment changed, switch interpreters
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- sxtw x2, wINST
- bl MterpLogOSR
-#endif
- mov x0, #1 // Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogFallback
-#endif
-MterpCommonFallback:
- mov x0, #0 // signal retry with reference interpreter.
- b MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR. Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- * uint32_t* xFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- mov x0, #1 // signal return to caller.
- b MterpDone
-MterpReturn:
- ldr x2, [xFP, #OFF_FP_RESULT_REGISTER]
- str x0, [x2]
- mov x0, #1 // signal return to caller.
-MterpDone:
-/*
- * At this point, we expect wPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending wPROFILE and the cached hotness counter). wPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- cmp wPROFILE, #0
- bgt MterpProfileActive // if > 0, we may have some counts to report.
- .cfi_remember_state
- RESTORE_TWO_REGS fp, lr, 64
- RESTORE_TWO_REGS xPC, xFP, 48
- RESTORE_TWO_REGS xSELF, xINST, 32
- RESTORE_TWO_REGS xIBASE, xREFS, 16
- RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
- ret
- .cfi_restore_state // Reset unwind info so following code unwinds.
- .cfi_def_cfa_offset 80 // workaround for clang bug: 31975598
-
-MterpProfileActive:
- mov xINST, x0 // stash return value
- /* Report cached hotness counts */
- ldr x0, [xFP, #OFF_FP_METHOD]
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xSELF
- strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- bl MterpAddHotnessBatch // (method, shadow_frame, self)
- mov x0, xINST // restore return value
- RESTORE_TWO_REGS fp, lr, 64
- RESTORE_TWO_REGS xPC, xFP, 48
- RESTORE_TWO_REGS xSELF, xINST, 32
- RESTORE_TWO_REGS xIBASE, xREFS, 16
- RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
- ret
-
- .type artMterpAsmAltInstructionStart, #object
- .hidden artMterpAsmAltInstructionStart
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (0 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move: /* 0x01 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (1 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (2 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (3 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (4 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (5 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (6 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (7 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (8 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (9 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (10 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (11 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (12 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (13 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (14 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return: /* 0x0f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (15 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (16 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (17 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (18 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (19 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const: /* 0x14 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (20 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (21 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (22 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (23 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (24 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (25 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (26 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (27 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (28 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (29 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (30 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (31 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (32 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (33 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (34 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (35 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (36 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (37 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (38 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (39 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (40 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (41 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (42 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (43 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (44 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (45 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (46 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (47 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (48 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (49 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (50 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (51 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (52 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (53 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (54 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (55 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (56 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (57 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (58 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (59 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (60 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (61 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (62 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (63 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (64 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (65 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (66 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (67 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (68 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (69 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (70 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (71 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (72 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (73 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (74 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (75 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (76 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (77 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (78 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (79 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (80 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (81 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (82 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (83 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (84 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (85 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (86 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (87 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (88 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (89 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (90 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (91 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (92 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (93 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (94 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (95 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (96 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (97 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (98 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (99 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (100 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (101 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (102 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (103 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (104 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (105 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (106 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (107 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (108 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (109 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (110 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (111 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (112 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (113 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (114 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (115 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (116 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (117 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (118 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (119 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (120 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (121 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (122 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (123 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (124 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (125 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (126 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (127 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (128 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (129 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (130 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (131 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (132 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (133 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (134 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (135 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (136 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (137 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (138 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (139 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (140 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (141 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (142 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (143 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (144 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (145 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (146 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (147 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (148 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (149 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (150 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (151 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (152 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (153 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (154 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (155 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (156 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (157 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (158 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (159 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (160 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (161 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (162 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (163 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (164 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (165 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (166 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (167 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (168 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (169 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (170 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (171 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (172 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (173 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (174 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (175 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (176 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (177 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (178 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (179 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (180 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (181 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (182 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (183 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (184 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (185 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (186 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (187 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (188 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (189 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (190 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (191 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (192 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (193 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (194 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (195 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (196 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (197 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (198 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (199 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (200 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (201 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (202 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (203 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (204 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (205 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (206 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (207 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (208 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (209 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (210 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (211 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (212 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (213 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (214 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (215 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (216 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (217 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (218 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (219 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (220 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (221 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (222 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (223 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (224 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (225 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (226 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (227 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (228 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (229 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (230 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (231 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (232 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (233 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (234 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (235 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (236 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (237 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (238 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (239 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (240 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (241 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (242 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (243 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (244 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (245 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (246 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (247 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (248 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (249 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (250 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (251 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (252 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (253 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (254 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (255 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
- .balign 128
-
- .type artMterpAsmAltInstructionEnd, #object
- .hidden artMterpAsmAltInstructionEnd
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
-// Close out the cfi info. We're treating mterp as a single function.
-
-END ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
deleted file mode 100644
index ff2605d..0000000
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ /dev/null
@@ -1,11398 +0,0 @@
-/* DO NOT EDIT: This file was generated by gen-mterp.py. */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#if (__mips==32) && (__mips_isa_rev>=2)
-#define MIPS32REVGE2 /* mips32r2 and greater */
-#if (__mips==32) && (__mips_isa_rev>=5)
-#define FPU64 /* 64 bit FPU */
-#if (__mips==32) && (__mips_isa_rev>=6)
-#define MIPS32REVGE6 /* mips32r6 and greater */
-#endif
-#endif
-#endif
-
-/* MIPS definitions and declarations
-
- reg nick purpose
- s0 rPC interpreted program counter, used for fetching instructions
- s1 rFP interpreted frame pointer, used for accessing locals and args
- s2 rSELF self (Thread) pointer
- s3 rIBASE interpreted instruction base pointer, used for computed goto
- s4 rINST first 16-bit code unit of current instruction
- s5 rOBJ object pointer
- s6 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
- s7 rTEMP used as temp storage that can survive a function call
- s8 rPROFILE branch profiling countdown
-
-*/
-
-/* single-purpose registers, given names for clarity */
-#define rPC s0
-#define CFI_DEX 16 // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP 4 // DWARF register number of the first argument register (a0).
-#define rFP s1
-#define rSELF s2
-#define rIBASE s3
-#define rINST s4
-#define rOBJ s5
-#define rREFS s6
-#define rTEMP s7
-#define rPROFILE s8
-
-#define rARG0 a0
-#define rARG1 a1
-#define rARG2 a2
-#define rARG3 a3
-#define rRESULT0 v0
-#define rRESULT1 v1
-
-/* GP register definitions */
-#define zero $0 /* always zero */
-#define AT $at /* assembler temp */
-#define v0 $2 /* return value */
-#define v1 $3
-#define a0 $4 /* argument registers */
-#define a1 $5
-#define a2 $6
-#define a3 $7
-#define t0 $8 /* temp registers (not saved across subroutine calls) */
-#define t1 $9
-#define t2 $10
-#define t3 $11
-#define t4 $12
-#define t5 $13
-#define t6 $14
-#define t7 $15
-#define ta0 $12 /* alias */
-#define ta1 $13
-#define ta2 $14
-#define ta3 $15
-#define s0 $16 /* saved across subroutine calls (callee saved) */
-#define s1 $17
-#define s2 $18
-#define s3 $19
-#define s4 $20
-#define s5 $21
-#define s6 $22
-#define s7 $23
-#define t8 $24 /* two more temp registers */
-#define t9 $25
-#define k0 $26 /* kernel temporary */
-#define k1 $27
-#define gp $28 /* global pointer */
-#define sp $29 /* stack pointer */
-#define s8 $30 /* one more callee saved */
-#define ra $31 /* return address */
-
-/* FP register definitions */
-#define fv0 $f0
-#define fv0f $f1
-#define fv1 $f2
-#define fv1f $f3
-#define fa0 $f12
-#define fa0f $f13
-#define fa1 $f14
-#define fa1f $f15
-#define ft0 $f4
-#define ft0f $f5
-#define ft1 $f6
-#define ft1f $f7
-#define ft2 $f8
-#define ft2f $f9
-#define ft3 $f10
-#define ft3f $f11
-#define ft4 $f16
-#define ft4f $f17
-#define ft5 $f18
-#define ft5f $f19
-#define fs0 $f20
-#define fs0f $f21
-#define fs1 $f22
-#define fs1f $f23
-#define fs2 $f24
-#define fs2f $f25
-#define fs3 $f26
-#define fs3f $f27
-#define fs4 $f28
-#define fs4f $f29
-#define fs5 $f30
-#define fs5f $f31
-
-#ifndef MIPS32REVGE6
-#define fcc0 $fcc0
-#define fcc1 $fcc1
-#endif
-
-#ifdef MIPS32REVGE2
-#define SEB(rd, rt) \
- seb rd, rt
-#define SEH(rd, rt) \
- seh rd, rt
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
- ins rd_lo, rt_hi, 16, 16
-#else
-#define SEB(rd, rt) \
- sll rd, rt, 24; \
- sra rd, rd, 24
-#define SEH(rd, rt) \
- sll rd, rt, 16; \
- sra rd, rd, 16
-/* Clobbers rt_hi on pre-R2. */
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
- sll rt_hi, rt_hi, 16; \
- or rd_lo, rt_hi
-#endif
-
-#ifdef FPU64
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
- mthc1 r, flo
-#else
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
- mtc1 r, fhi
-#endif
-
-#ifdef MIPS32REVGE6
-#define JR(rt) \
- jic rt, 0
-#define LSA(rd, rs, rt, sa) \
- .if sa; \
- lsa rd, rs, rt, sa; \
- .else; \
- addu rd, rs, rt; \
- .endif
-#else
-#define JR(rt) \
- jalr zero, rt
-#define LSA(rd, rs, rt, sa) \
- .if sa; \
- .set push; \
- .set noat; \
- sll AT, rs, sa; \
- addu rd, AT, rt; \
- .set pop; \
- .else; \
- addu rd, rs, rt; \
- .endif
-#endif
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-#define EXPORT_PC() \
- sw rPC, OFF_FP_DEX_PC_PTR(rFP)
-
-#define EXPORT_DEX_PC(tmp) \
- lw tmp, OFF_FP_DEX_INSTRUCTIONS(rFP); \
- sw rPC, OFF_FP_DEX_PC_PTR(rFP); \
- subu tmp, rPC, tmp; \
- sra tmp, tmp, 1; \
- sw tmp, OFF_FP_DEX_PC(rFP)
-
-/*
- * Fetch the next instruction from rPC into rINST. Does not advance rPC.
- */
-#define FETCH_INST() lhu rINST, (rPC)
-
-/*
- * Fetch the next instruction from the specified offset. Advances rPC
- * to point to the next instruction. "_count" is in 16-bit code units.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC().)
- */
-#define FETCH_ADVANCE_INST(_count) \
- lhu rINST, ((_count)*2)(rPC); \
- addu rPC, rPC, ((_count) * 2)
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
- * rINST ahead of possible exception point. Be sure to manually advance rPC
- * later.
- */
-#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
-
-/* Advance rPC by some number of code units. */
-#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
-
-/*
- * Fetch the next instruction from an offset specified by rd. Updates
- * rPC to point to the next instruction. "rd" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.
- */
-#define FETCH_ADVANCE_INST_RB(rd) \
- addu rPC, rPC, rd; \
- lhu rINST, (rPC)
-
-/*
- * Fetch a half-word code unit from an offset past the current PC. The
- * "_count" value is in 16-bit code units. Does not advance rPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
-#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
-
-/*
- * Fetch one byte from an offset past the current PC. Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
-
-/*
- * Transform opcode into branch target address.
- */
-#define GET_OPCODE_TARGET(rd) \
- sll rd, rd, 7; \
- addu rd, rIBASE, rd
-
-/*
- * Begin executing the opcode in rd.
- */
-#define GOTO_OPCODE(rd) \
- GET_OPCODE_TARGET(rd); \
- JR(rd)
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
-
-#define GET_VREG_F(rd, rix) \
- .set noat; \
- EAS2(AT, rFP, rix); \
- l.s rd, (AT); \
- .set at
-
-#ifdef MIPS32REVGE6
-#define SET_VREG(rd, rix) \
- lsa t8, rix, rFP, 2; \
- sw rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8)
-#else
-#define SET_VREG(rd, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT(rd, rix) \
- lsa t8, rix, rFP, 2; \
- sw rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- sw rd, 0(t8)
-#else
-#define SET_VREG_OBJECT(rd, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw rd, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64(rlo, rhi, rix) \
- lsa t8, rix, rFP, 2; \
- sw rlo, 0(t8); \
- sw rhi, 4(t8); \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8); \
- sw zero, 4(t8)
-#else
-#define SET_VREG64(rlo, rhi, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rlo, 0(t8); \
- sw rhi, 4(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8); \
- sw zero, 4(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_F(rd, rix) \
- lsa t8, rix, rFP, 2; \
- s.s rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8)
-#else
-#define SET_VREG_F(rd, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- s.s rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F(rlo, rhi, rix) \
- lsa t8, rix, rFP, 2; \
- .set noat; \
- mfhc1 AT, rlo; \
- s.s rlo, 0(t8); \
- sw AT, 4(t8); \
- .set at; \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8); \
- sw zero, 4(t8)
-#elif defined(FPU64)
-#define SET_VREG64_F(rlo, rhi, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rREFS, AT; \
- sw zero, 0(t8); \
- sw zero, 4(t8); \
- addu t8, rFP, AT; \
- mfhc1 AT, rlo; \
- sw AT, 4(t8); \
- .set at; \
- s.s rlo, 0(t8)
-#else
-#define SET_VREG64_F(rlo, rhi, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- s.s rlo, 0(t8); \
- s.s rhi, 4(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8); \
- sw zero, 4(t8)
-#endif
-
-/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- sw rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- jalr zero, dst; \
- sw zero, 0(t8); \
- .set reorder
-#else
-#define SET_VREG_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- jalr zero, dst; \
- sw zero, 0(t8); \
- .set reorder
-#endif
-
-/* Combination of the SET_VREG_OBJECT and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- sw rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- jalr zero, dst; \
- sw rd, 0(t8); \
- .set reorder
-#else
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- jalr zero, dst; \
- sw rd, 0(t8); \
- .set reorder
-#endif
-
-/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- sw rlo, 0(t8); \
- sw rhi, 4(t8); \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8); \
- jalr zero, dst; \
- sw zero, 4(t8); \
- .set reorder
-#else
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rlo, 0(t8); \
- sw rhi, 4(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8); \
- jalr zero, dst; \
- sw zero, 4(t8); \
- .set reorder
-#endif
-
-/* Combination of the SET_VREG_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_F_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- s.s rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- jalr zero, dst; \
- sw zero, 0(t8); \
- .set reorder
-#else
-#define SET_VREG_F_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- s.s rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- jalr zero, dst; \
- sw zero, 0(t8); \
- .set reorder
-#endif
-
-/* Combination of the SET_VREG64_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- .set noat; \
- mfhc1 AT, rlo; \
- s.s rlo, 0(t8); \
- sw AT, 4(t8); \
- .set at; \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8); \
- jalr zero, dst; \
- sw zero, 4(t8); \
- .set reorder
-#elif defined(FPU64)
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rREFS, AT; \
- sw zero, 0(t8); \
- sw zero, 4(t8); \
- addu t8, rFP, AT; \
- mfhc1 AT, rlo; \
- sw AT, 4(t8); \
- .set at; \
- jalr zero, dst; \
- s.s rlo, 0(t8); \
- .set reorder
-#else
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- s.s rlo, 0(t8); \
- s.s rhi, 4(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8); \
- jalr zero, dst; \
- sw zero, 4(t8); \
- .set reorder
-#endif
-
-#define GET_OPA(rd) srl rd, rINST, 8
-#ifdef MIPS32REVGE2
-#define GET_OPA4(rd) ext rd, rINST, 8, 4
-#else
-#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
-#endif
-#define GET_OPB(rd) srl rd, rINST, 12
-
-/*
- * Form an Effective Address rd = rbase + roff<<shift;
- * Uses reg AT on pre-R6.
- */
-#define EASN(rd, rbase, roff, shift) LSA(rd, roff, rbase, shift)
-
-#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
-#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
-#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
-#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
-
-#define LOAD_eas2(rd, rbase, roff) \
- .set noat; \
- EAS2(AT, rbase, roff); \
- lw rd, 0(AT); \
- .set at
-
-#define STORE_eas2(rd, rbase, roff) \
- .set noat; \
- EAS2(AT, rbase, roff); \
- sw rd, 0(AT); \
- .set at
-
-#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
-#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
-
-#define STORE64_off(rlo, rhi, rbase, off) \
- sw rlo, off(rbase); \
- sw rhi, (off+4)(rbase)
-#define LOAD64_off(rlo, rhi, rbase, off) \
- lw rlo, off(rbase); \
- lw rhi, (off+4)(rbase)
-
-#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
-#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
-
-#ifdef FPU64
-#define STORE64_off_F(rlo, rhi, rbase, off) \
- s.s rlo, off(rbase); \
- .set noat; \
- mfhc1 AT, rlo; \
- sw AT, (off+4)(rbase); \
- .set at
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
- l.s rlo, off(rbase); \
- .set noat; \
- lw AT, (off+4)(rbase); \
- mthc1 AT, rlo; \
- .set at
-#else
-#define STORE64_off_F(rlo, rhi, rbase, off) \
- s.s rlo, off(rbase); \
- s.s rhi, (off+4)(rbase)
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
- l.s rlo, off(rbase); \
- l.s rhi, (off+4)(rbase)
-#endif
-
-#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
-#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
-
-#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
-
-#define STACK_STORE(rd, off) sw rd, off(sp)
-#define STACK_LOAD(rd, off) lw rd, off(sp)
-#define CREATE_STACK(n) subu sp, sp, n
-#define DELETE_STACK(n) addu sp, sp, n
-
-#define LOAD_ADDR(dest, addr) la dest, addr
-#define LOAD_IMM(dest, imm) li dest, imm
-#define MOVE_REG(dest, src) move dest, src
-#define STACK_SIZE 128
-
-#define STACK_OFFSET_ARG04 16
-#define STACK_OFFSET_ARG05 20
-#define STACK_OFFSET_ARG06 24
-#define STACK_OFFSET_ARG07 28
-#define STACK_OFFSET_GP 84
-
-#define JAL(n) jal n
-#define BAL(n) bal n
-
-/*
- * FP register usage restrictions:
- * 1) We don't use the callee save FP registers so we don't have to save them.
- * 2) We don't use the odd FP registers so we can share code with mips32r6.
- */
-#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
- STACK_STORE(ra, 124); \
- STACK_STORE(s8, 120); \
- STACK_STORE(s0, 116); \
- STACK_STORE(s1, 112); \
- STACK_STORE(s2, 108); \
- STACK_STORE(s3, 104); \
- STACK_STORE(s4, 100); \
- STACK_STORE(s5, 96); \
- STACK_STORE(s6, 92); \
- STACK_STORE(s7, 88);
-
-#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
- STACK_LOAD(s7, 88); \
- STACK_LOAD(s6, 92); \
- STACK_LOAD(s5, 96); \
- STACK_LOAD(s4, 100); \
- STACK_LOAD(s3, 104); \
- STACK_LOAD(s2, 108); \
- STACK_LOAD(s1, 112); \
- STACK_LOAD(s0, 116); \
- STACK_LOAD(s8, 120); \
- STACK_LOAD(ra, 124); \
- DELETE_STACK(STACK_SIZE)
-
-#define REFRESH_IBASE() \
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN 0x80000000
-#define INT_MIN_AS_FLOAT 0xCF000000
-#define INT_MIN_AS_DOUBLE_HIGH 0xC1E00000
-#define LONG_MIN_HIGH 0x80000000
-#define LONG_MIN_AS_FLOAT 0xDF000000
-#define LONG_MIN_AS_DOUBLE_HIGH 0xC3E00000
-
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
- .text
- .align 2
- .global ExecuteMterpImpl
- .ent ExecuteMterpImpl
- .frame sp, STACK_SIZE, ra
-/*
- * On entry:
- * a0 Thread* self
- * a1 dex_instructions
- * a2 ShadowFrame
- * a3 JValue* result_register
- *
- */
-
-ExecuteMterpImpl:
- .cfi_startproc
- .set noreorder
- .cpload t9
- .set reorder
-/* Save to the stack. Frame size = STACK_SIZE */
- STACK_STORE_FULL()
-/* This directive will make sure all subsequent jal restore gp at a known offset */
- .cprestore STACK_OFFSET_GP
-
- /* Remember the return register */
- sw a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
- /* Remember the dex instruction pointer */
- sw a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
- /* set up "named" registers */
- move rSELF, a0
- lw a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
- addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to vregs.
- EAS2(rREFS, rFP, a0) # point to reference array in shadow frame
- lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc
- EAS1(rPC, a1, a0) # Create direct pointer to 1st dex opcode
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-
- EXPORT_PC()
-
- /* Starting ibase */
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
- /* Set up for backwards branches & osr profiling */
- lw a0, OFF_FP_METHOD(rFP)
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rSELF
- JAL(MterpSetUpHotnessCountdown) # (method, shadow_frame, self)
- move rPROFILE, v0 # Starting hotness countdown to rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST() # load rINST from rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
- /* NOTE: no fallthrough */
-
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_op_nop: /* 0x00 */
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move: /* 0x01 */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- GET_OPB(a1) # a1 <- B from 15:12
- GET_OPA4(a0) # a0 <- A from 11:8
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[B]
- GET_INST_OPCODE(t0) # t0 <- opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[A] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_from16: /* 0x02 */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH(a1, 1) # a1 <- BBBB
- GET_OPA(a0) # a0 <- AA
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[BBBB]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AA] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[AA] <- a2
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_16: /* 0x03 */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH(a1, 2) # a1 <- BBBB
- FETCH(a0, 1) # a0 <- AAAA
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[BBBB]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AAAA] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[AAAA] <- a2
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide: /* 0x04 */
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
- GET_OPA4(a2) # a2 <- A(+)
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- fp[B]
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[A] <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_from16: /* 0x05 */
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
- FETCH(a3, 1) # a3 <- BBBB
- GET_OPA(a2) # a2 <- AA
- EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
- LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AA] <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_16: /* 0x06 */
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
- FETCH(a3, 2) # a3 <- BBBB
- FETCH(a2, 1) # a2 <- AAAA
- EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
- LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AAAA] <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object: /* 0x07 */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- GET_OPB(a1) # a1 <- B from 15:12
- GET_OPA4(a0) # a0 <- A from 11:8
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[B]
- GET_INST_OPCODE(t0) # t0 <- opcode from rINST
- .if 1
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[A] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_from16: /* 0x08 */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH(a1, 1) # a1 <- BBBB
- GET_OPA(a0) # a0 <- AA
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[BBBB]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AA] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[AA] <- a2
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_16: /* 0x09 */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH(a1, 2) # a1 <- BBBB
- FETCH(a0, 1) # a0 <- AAAA
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[BBBB]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AAAA] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[AAAA] <- a2
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result: /* 0x0a */
- /* for: move-result, move-result-object */
- /* op vAA */
- GET_OPA(a2) # a2 <- AA
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- lw a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- lw a0, 0(a0) # a0 <- result.i
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(a0, a2, t0) # fp[AA] <- a0
- .else
- SET_VREG_GOTO(a0, a2, t0) # fp[AA] <- a0
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_wide: /* 0x0b */
- /* move-result-wide vAA */
- GET_OPA(a2) # a2 <- AA
- lw a3, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- LOAD64(a0, a1, a3) # a0/a1 <- retval.j
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AA] <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_object: /* 0x0c */
- /* for: move-result, move-result-object */
- /* op vAA */
- GET_OPA(a2) # a2 <- AA
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- lw a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- lw a0, 0(a0) # a0 <- result.i
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT_GOTO(a0, a2, t0) # fp[AA] <- a0
- .else
- SET_VREG_GOTO(a0, a2, t0) # fp[AA] <- a0
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_exception: /* 0x0d */
- /* move-exception vAA */
- GET_OPA(a2) # a2 <- AA
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF) # get exception obj
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- SET_VREG_OBJECT(a3, a2) # fp[AA] <- exception obj
- sw zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
- JR(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void: /* 0x0e */
- .extern MterpThreadFenceForConstructor
- JAL(MterpThreadFenceForConstructor)
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- move v0, zero
- move v1, zero
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return: /* 0x0f */
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- JAL(MterpThreadFenceForConstructor)
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- GET_OPA(a2) # a2 <- AA
- GET_VREG(v0, a2) # v0 <- vAA
- move v1, zero
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_wide: /* 0x10 */
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- JAL(MterpThreadFenceForConstructor)
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- GET_OPA(a2) # a2 <- AA
- EAS2(a2, rFP, a2) # a2 <- &fp[AA]
- LOAD64(v0, v1, a2) # v0/v1 <- vAA/vAA+1
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_object: /* 0x11 */
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- JAL(MterpThreadFenceForConstructor)
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- GET_OPA(a2) # a2 <- AA
- GET_VREG(v0, a2) # v0 <- vAA
- move v1, zero
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_4: /* 0x12 */
- /* const/4 vA, +B */
- sll a1, rINST, 16 # a1 <- Bxxx0000
- GET_OPA(a0) # a0 <- A+
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- sra a1, a1, 28 # a1 <- sssssssB (sign-extended)
- and a0, a0, 15
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a1, a0, t0) # fp[A] <- a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_16: /* 0x13 */
- /* const/16 vAA, +BBBB */
- FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
- GET_OPA(a3) # a3 <- AA
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_const: /* 0x14 */
- /* const vAA, +BBBBbbbb */
- GET_OPA(a3) # a3 <- AA
- FETCH(a0, 1) # a0 <- bbbb (low)
- FETCH(a1, 2) # a1 <- BBBB (high)
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_high16: /* 0x15 */
- /* const/high16 vAA, +BBBB0000 */
- FETCH(a0, 1) # a0 <- 0000BBBB (zero-extended)
- GET_OPA(a3) # a3 <- AA
- sll a0, a0, 16 # a0 <- BBBB0000
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_16: /* 0x16 */
- /* const-wide/16 vAA, +BBBB */
- FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
- GET_OPA(a3) # a3 <- AA
- sra a1, a0, 31 # a1 <- ssssssss
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_32: /* 0x17 */
- /* const-wide/32 vAA, +BBBBbbbb */
- FETCH(a0, 1) # a0 <- 0000bbbb (low)
- GET_OPA(a3) # a3 <- AA
- FETCH_S(a2, 2) # a2 <- ssssBBBB (high)
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- INSERT_HIGH_HALF(a0, a2) # a0 <- BBBBbbbb
- sra a1, a0, 31 # a1 <- ssssssss
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide: /* 0x18 */
- /* const-wide vAA, +HHHHhhhhBBBBbbbb */
- FETCH(a0, 1) # a0 <- bbbb (low)
- FETCH(a1, 2) # a1 <- BBBB (low middle)
- FETCH(a2, 3) # a2 <- hhhh (high middle)
- INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb (low word)
- FETCH(a3, 4) # a3 <- HHHH (high)
- GET_OPA(t1) # t1 <- AA
- INSERT_HIGH_HALF(a2, a3) # a2 <- HHHHhhhh (high word)
- FETCH_ADVANCE_INST(5) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a2, t1, t0) # vAA/vAA+1 <- a0/a2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_high16: /* 0x19 */
- /* const-wide/high16 vAA, +BBBB000000000000 */
- FETCH(a1, 1) # a1 <- 0000BBBB (zero-extended)
- GET_OPA(a3) # a3 <- AA
- li a0, 0 # a0 <- 00000000
- sll a1, 16 # a1 <- BBBB0000
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string: /* 0x1a */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstString
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
- PREFETCH_INST(2) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
- /* const/string vAA, string@BBBBBBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- bbbb (low)
- FETCH(a2, 2) # a2 <- BBBB (high)
- GET_OPA(a1) # a1 <- AA
- INSERT_HIGH_HALF(a0, a2) # a0 <- BBBBbbbb
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
- PREFETCH_INST(3) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(3) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_class: /* 0x1c */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstClass
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL(MterpConstClass) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
- PREFETCH_INST(2) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_enter: /* 0x1d */
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC()
- GET_OPA(a2) # a2 <- AA
- GET_VREG(a0, a2) # a0 <- vAA (object)
- move a1, rSELF # a1 <- self
- JAL(artLockObjectFromCode) # v0 <- artLockObject(obj, self)
- bnez v0, MterpException
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_exit: /* 0x1e */
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC()
- GET_OPA(a2) # a2 <- AA
- GET_VREG(a0, a2) # a0 <- vAA (object)
- move a1, rSELF # a1 <- self
- JAL(artUnlockObjectFromCode) # v0 <- artUnlockObject(obj, self)
- bnez v0, MterpException
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_check_cast: /* 0x1f */
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- EAS2(a1, rFP, a1) # a1 <- &object
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- JAL(MterpCheckCast) # v0 <- CheckCast(index, &obj, method, self)
- PREFETCH_INST(2)
- bnez v0, MterpPossibleException
- ADVANCE(2)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_instance_of: /* 0x20 */
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- CCCC
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &object
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- GET_OPA4(rOBJ) # rOBJ <- A+
- JAL(MterpInstanceOf) # v0 <- Mterp(index, &obj, method, self)
- lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
- PREFETCH_INST(2) # load rINST
- bnez a1, MterpException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(v0, rOBJ, t0) # vA <- v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_array_length: /* 0x21 */
- /*
- * Return the length of an array.
- */
- /* array-length vA, vB */
- GET_OPB(a1) # a1 <- B
- GET_OPA4(a2) # a2 <- A+
- GET_VREG(a0, a1) # a0 <- vB (object ref)
- # is object null?
- beqz a0, common_errNullObject # yup, fail
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- array length
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a3, a2, t0) # vA <- length
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_instance: /* 0x22 */
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rSELF
- move a2, rINST
- JAL(MterpNewInstance)
- beqz v0, MterpPossibleException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_array: /* 0x23 */
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- JAL(MterpNewArray)
- beqz v0, MterpPossibleException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array: /* 0x24 */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArray
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME # a0 <- shadow frame
- move a1, rPC
- move a2, rSELF
- JAL(MterpFilledNewArray) # v0 <- helper(shadow_frame, pc, self)
- beqz v0, MterpPossibleException # has exception
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArrayRange
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME # a0 <- shadow frame
- move a1, rPC
- move a2, rSELF
- JAL(MterpFilledNewArrayRange) # v0 <- helper(shadow_frame, pc, self)
- beqz v0, MterpPossibleException # has exception
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_fill_array_data: /* 0x26 */
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC()
- FETCH(a1, 1) # a1 <- bbbb (lo)
- FETCH(a0, 2) # a0 <- BBBB (hi)
- GET_OPA(a3) # a3 <- AA
- INSERT_HIGH_HALF(a1, a0) # a1 <- BBBBbbbb
- GET_VREG(a0, a3) # a0 <- vAA (array object)
- EAS1(a1, rPC, a1) # a1 <- PC + BBBBbbbb*2 (array data off.)
- JAL(MterpFillArrayData) # v0 <- Mterp(obj, payload)
- beqz v0, MterpPossibleException # has exception
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_throw: /* 0x27 */
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC() # exception handler can throw
- GET_OPA(a2) # a2 <- AA
- GET_VREG(a1, a2) # a1 <- vAA (exception object)
- # null object?
- beqz a1, common_errNullObject # yes, throw an NPE instead
- sw a1, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj
- b MterpException
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto: /* 0x28 */
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- sll a0, rINST, 16 # a0 <- AAxx0000
- sra rINST, a0, 24 # rINST <- ssssssAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_16: /* 0x29 */
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- FETCH_S(rINST, 1) # rINST <- ssssAAAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_32: /* 0x2a */
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0".
- */
- /* goto/32 +AAAAAAAA */
- FETCH(rINST, 1) # rINST <- aaaa (lo)
- FETCH(a1, 2) # a1 <- AAAA (hi)
- INSERT_HIGH_HALF(rINST, a1) # rINST <- AAAAaaaa
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_packed_switch: /* 0x2b */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH(a0, 1) # a0 <- bbbb (lo)
- FETCH(a1, 2) # a1 <- BBBB (hi)
- GET_OPA(a3) # a3 <- AA
- INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
- GET_VREG(a1, a3) # a1 <- vAA
- EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
- JAL(MterpDoPackedSwitch) # a0 <- code-unit branch offset
- move rINST, v0
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_sparse_switch: /* 0x2c */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH(a0, 1) # a0 <- bbbb (lo)
- FETCH(a1, 2) # a1 <- BBBB (hi)
- GET_OPA(a3) # a3 <- AA
- INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
- GET_VREG(a1, a3) # a1 <- vAA
- EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
- JAL(MterpDoSparseSwitch) # a0 <- code-unit branch offset
- move rINST, v0
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_float: /* 0x2d */
- /*
- * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
- * into the destination register based on the comparison results.
- *
- * for: cmpl-float, cmpg-float
- */
- /* op vAA, vBB, vCC */
-
- FETCH(a0, 1) # a0 <- CCBB
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8
- GET_VREG_F(ft0, a2)
- GET_VREG_F(ft1, a3)
-#ifdef MIPS32REVGE6
- cmp.eq.s ft2, ft0, ft1
- li rTEMP, 0
- bc1nez ft2, 1f # done if vBB == vCC (ordered)
- .if 0
- cmp.lt.s ft2, ft0, ft1
- li rTEMP, -1
- bc1nez ft2, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- cmp.lt.s ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#else
- c.eq.s fcc0, ft0, ft1
- li rTEMP, 0
- bc1t fcc0, 1f # done if vBB == vCC (ordered)
- .if 0
- c.olt.s fcc0, ft0, ft1
- li rTEMP, -1
- bc1t fcc0, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- c.olt.s fcc0, ft1, ft0
- li rTEMP, 1
- bc1t fcc0, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#endif
-1:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_float: /* 0x2e */
- /*
- * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
- * into the destination register based on the comparison results.
- *
- * for: cmpl-float, cmpg-float
- */
- /* op vAA, vBB, vCC */
-
- FETCH(a0, 1) # a0 <- CCBB
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8
- GET_VREG_F(ft0, a2)
- GET_VREG_F(ft1, a3)
-#ifdef MIPS32REVGE6
- cmp.eq.s ft2, ft0, ft1
- li rTEMP, 0
- bc1nez ft2, 1f # done if vBB == vCC (ordered)
- .if 1
- cmp.lt.s ft2, ft0, ft1
- li rTEMP, -1
- bc1nez ft2, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- cmp.lt.s ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#else
- c.eq.s fcc0, ft0, ft1
- li rTEMP, 0
- bc1t fcc0, 1f # done if vBB == vCC (ordered)
- .if 1
- c.olt.s fcc0, ft0, ft1
- li rTEMP, -1
- bc1t fcc0, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- c.olt.s fcc0, ft1, ft0
- li rTEMP, 1
- bc1t fcc0, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#endif
-1:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_double: /* 0x2f */
- /*
- * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
- * into the destination register based on the comparison results.
- *
- * For: cmpl-double, cmpg-double
- */
- /* op vAA, vBB, vCC */
-
- FETCH(a0, 1) # a0 <- CCBB
- and rOBJ, a0, 255 # rOBJ <- BB
- srl t0, a0, 8 # t0 <- CC
- EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[BB]
- EAS2(t0, rFP, t0) # t0 <- &fp[CC]
- LOAD64_F(ft0, ft0f, rOBJ)
- LOAD64_F(ft1, ft1f, t0)
-#ifdef MIPS32REVGE6
- cmp.eq.d ft2, ft0, ft1
- li rTEMP, 0
- bc1nez ft2, 1f # done if vBB == vCC (ordered)
- .if 0
- cmp.lt.d ft2, ft0, ft1
- li rTEMP, -1
- bc1nez ft2, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- cmp.lt.d ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#else
- c.eq.d fcc0, ft0, ft1
- li rTEMP, 0
- bc1t fcc0, 1f # done if vBB == vCC (ordered)
- .if 0
- c.olt.d fcc0, ft0, ft1
- li rTEMP, -1
- bc1t fcc0, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- c.olt.d fcc0, ft1, ft0
- li rTEMP, 1
- bc1t fcc0, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#endif
-1:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_double: /* 0x30 */
- /*
- * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
- * into the destination register based on the comparison results.
- *
- * For: cmpl-double, cmpg-double
- */
- /* op vAA, vBB, vCC */
-
- FETCH(a0, 1) # a0 <- CCBB
- and rOBJ, a0, 255 # rOBJ <- BB
- srl t0, a0, 8 # t0 <- CC
- EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[BB]
- EAS2(t0, rFP, t0) # t0 <- &fp[CC]
- LOAD64_F(ft0, ft0f, rOBJ)
- LOAD64_F(ft1, ft1f, t0)
-#ifdef MIPS32REVGE6
- cmp.eq.d ft2, ft0, ft1
- li rTEMP, 0
- bc1nez ft2, 1f # done if vBB == vCC (ordered)
- .if 1
- cmp.lt.d ft2, ft0, ft1
- li rTEMP, -1
- bc1nez ft2, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- cmp.lt.d ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#else
- c.eq.d fcc0, ft0, ft1
- li rTEMP, 0
- bc1t fcc0, 1f # done if vBB == vCC (ordered)
- .if 1
- c.olt.d fcc0, ft0, ft1
- li rTEMP, -1
- bc1t fcc0, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- c.olt.d fcc0, ft1, ft0
- li rTEMP, 1
- bc1t fcc0, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#endif
-1:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmp_long: /* 0x31 */
- /*
- * Compare two 64-bit values
- * x = y return 0
- * x < y return -1
- * x > y return 1
- *
- * I think I can improve on the ARM code by the following observation
- * slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
- * sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
- * subu v0, t0, t1 # v0= -1:1:0 for [ < > = ]
- */
- /* cmp-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(a3, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, a3) # a2/a3 <- vCC/vCC+1
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- slt t0, a1, a3 # compare hi
- sgt t1, a1, a3
- subu v0, t1, t0 # v0 <- (-1, 1, 0)
- bnez v0, .Lop_cmp_long_finish
- # at this point x.hi==y.hi
- sltu t0, a0, a2 # compare lo
- sgtu t1, a0, a2
- subu v0, t1, t0 # v0 <- (-1, 1, 0) for [< > =]
-
-.Lop_cmp_long_finish:
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(v0, rOBJ, t0) # vAA <- v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eq: /* 0x32 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- GET_OPA4(a0) # a0 <- A+
- GET_OPB(a1) # a1 <- B
- GET_VREG(a3, a1) # a3 <- vB
- GET_VREG(a0, a0) # a0 <- vA
- FETCH_S(rINST, 1) # rINST<- branch offset, in code units
- beq a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ne: /* 0x33 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- GET_OPA4(a0) # a0 <- A+
- GET_OPB(a1) # a1 <- B
- GET_VREG(a3, a1) # a3 <- vB
- GET_VREG(a0, a0) # a0 <- vA
- FETCH_S(rINST, 1) # rINST<- branch offset, in code units
- bne a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lt: /* 0x34 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- GET_OPA4(a0) # a0 <- A+
- GET_OPB(a1) # a1 <- B
- GET_VREG(a3, a1) # a3 <- vB
- GET_VREG(a0, a0) # a0 <- vA
- FETCH_S(rINST, 1) # rINST<- branch offset, in code units
- blt a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ge: /* 0x35 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- GET_OPA4(a0) # a0 <- A+
- GET_OPB(a1) # a1 <- B
- GET_VREG(a3, a1) # a3 <- vB
- GET_VREG(a0, a0) # a0 <- vA
- FETCH_S(rINST, 1) # rINST<- branch offset, in code units
- bge a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gt: /* 0x36 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- GET_OPA4(a0) # a0 <- A+
- GET_OPB(a1) # a1 <- B
- GET_VREG(a3, a1) # a3 <- vB
- GET_VREG(a0, a0) # a0 <- vA
- FETCH_S(rINST, 1) # rINST<- branch offset, in code units
- bgt a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_le: /* 0x37 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- GET_OPA4(a0) # a0 <- A+
- GET_OPB(a1) # a1 <- B
- GET_VREG(a3, a1) # a3 <- vB
- GET_VREG(a0, a0) # a0 <- vA
- FETCH_S(rINST, 1) # rINST<- branch offset, in code units
- ble a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eqz: /* 0x38 */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- GET_OPA(a0) # a0 <- AA
- GET_VREG(a0, a0) # a0 <- vAA
- FETCH_S(rINST, 1) # rINST <- branch offset, in code units
- beq a0, zero, MterpCommonTakenBranchNoFlags
- li t0, JIT_CHECK_OSR # possible OSR re-entry?
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_nez: /* 0x39 */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- GET_OPA(a0) # a0 <- AA
- GET_VREG(a0, a0) # a0 <- vAA
- FETCH_S(rINST, 1) # rINST <- branch offset, in code units
- bne a0, zero, MterpCommonTakenBranchNoFlags
- li t0, JIT_CHECK_OSR # possible OSR re-entry?
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ltz: /* 0x3a */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- GET_OPA(a0) # a0 <- AA
- GET_VREG(a0, a0) # a0 <- vAA
- FETCH_S(rINST, 1) # rINST <- branch offset, in code units
- blt a0, zero, MterpCommonTakenBranchNoFlags
- li t0, JIT_CHECK_OSR # possible OSR re-entry?
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gez: /* 0x3b */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- GET_OPA(a0) # a0 <- AA
- GET_VREG(a0, a0) # a0 <- vAA
- FETCH_S(rINST, 1) # rINST <- branch offset, in code units
- bge a0, zero, MterpCommonTakenBranchNoFlags
- li t0, JIT_CHECK_OSR # possible OSR re-entry?
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gtz: /* 0x3c */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- GET_OPA(a0) # a0 <- AA
- GET_VREG(a0, a0) # a0 <- vAA
- FETCH_S(rINST, 1) # rINST <- branch offset, in code units
- bgt a0, zero, MterpCommonTakenBranchNoFlags
- li t0, JIT_CHECK_OSR # possible OSR re-entry?
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lez: /* 0x3d */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- GET_OPA(a0) # a0 <- AA
- GET_VREG(a0, a0) # a0 <- vAA
- FETCH_S(rINST, 1) # rINST <- branch offset, in code units
- ble a0, zero, MterpCommonTakenBranchNoFlags
- li t0, JIT_CHECK_OSR # possible OSR re-entry?
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3e: /* 0x3e */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3f: /* 0x3f */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_40: /* 0x40 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_41: /* 0x41 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_42: /* 0x42 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_43: /* 0x43 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget: /* 0x44 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 2) # a0 <- arrayObj + index*width
- # a1 >= a3; compare unsigned index
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- lw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_wide: /* 0x45 */
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- * Arrays of long/double are 64-bit aligned.
- */
- /* aget-wide vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a2, a3, rOBJ, t0) # vAA/vAA+1 <- a2/a3
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_object: /* 0x46 */
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- EXPORT_PC()
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- JAL(artAGetObjectFromMterp) # v0 <- GetObj(array, index)
- lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
- PREFETCH_INST(2) # load rINST
- bnez a1, MterpException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_OBJECT_GOTO(v0, rOBJ, t0) # vAA <- v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_boolean: /* 0x47 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
- # a1 >= a3; compare unsigned index
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- lbu a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_byte: /* 0x48 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
- # a1 >= a3; compare unsigned index
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- lb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_char: /* 0x49 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
- # a1 >= a3; compare unsigned index
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- lhu a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_short: /* 0x4a */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
- # a1 >= a3; compare unsigned index
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- lh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput: /* 0x4b */
-
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 2) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, rOBJ) # a2 <- vAA
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- sw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- JR(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_wide: /* 0x4c */
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- */
- /* aput-wide vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(t0) # t0 <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
- EAS2(rOBJ, rFP, t0) # rOBJ <- &fp[AA]
- # compare unsigned index, length
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- LOAD64(a2, a3, rOBJ) # a2/a3 <- vAA/vAA+1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) # a2/a3 <- vBB[vCC]
- JR(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_object: /* 0x4d */
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- *
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- JAL(MterpAputObject)
- beqz v0, MterpPossibleException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_boolean: /* 0x4e */
-
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, rOBJ) # a2 <- vAA
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- sb a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- JR(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_byte: /* 0x4f */
-
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, rOBJ) # a2 <- vAA
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- sb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- JR(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_char: /* 0x50 */
-
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, rOBJ) # a2 <- vAA
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- sh a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- JR(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_short: /* 0x51 */
-
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, rOBJ) # a2 <- vAA
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- sh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- JR(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget: /* 0x52 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide: /* 0x53 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object: /* 0x54 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean: /* 0x55 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte: /* 0x56 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char: /* 0x57 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short: /* 0x58 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput: /* 0x59 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide: /* 0x5a */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object: /* 0x5b */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean: /* 0x5c */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte: /* 0x5d */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char: /* 0x5e */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short: /* 0x5f */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget: /* 0x60 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_wide: /* 0x61 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_object: /* 0x62 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_boolean: /* 0x63 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_byte: /* 0x64 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_char: /* 0x65 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_short: /* 0x66 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput: /* 0x67 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_wide: /* 0x68 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_object: /* 0x69 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_boolean: /* 0x6a */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_byte: /* 0x6b */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_char: /* 0x6c */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_short: /* 0x6d */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual: /* 0x6e */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtual
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeVirtual)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super: /* 0x6f */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuper
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeSuper)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct: /* 0x70 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirect
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeDirect)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static: /* 0x71 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStatic
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeStatic)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface: /* 0x72 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterface
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeInterface)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- move v0, zero
- move v1, zero
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeVirtualRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super_range: /* 0x75 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuperRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeSuperRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirectRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeDirectRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static_range: /* 0x77 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStaticRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeStaticRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterfaceRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeInterfaceRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_79: /* 0x79 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_7a: /* 0x7a */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_int: /* 0x7b */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0 = op a0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * neg-int, not-int, neg-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(t0) # t0 <- A+
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- negu a0, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vA <- result0
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_int: /* 0x7c */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0 = op a0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * neg-int, not-int, neg-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(t0) # t0 <- A+
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- not a0, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vA <- result0
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_long: /* 0x7d */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0/result1 = op a0/a1".
- * This could be MIPS instruction or a function call.
- *
- * For: neg-long, not-long, neg-double,
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- vA
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- negu v0, a0 # optional op
- negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0 # a0/a1 <- op, a2-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_long: /* 0x7e */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0/result1 = op a0/a1".
- * This could be MIPS instruction or a function call.
- *
- * For: neg-long, not-long, neg-double,
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- vA
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- not a0, a0 # optional op
- not a1, a1 # a0/a1 <- op, a2-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_float: /* 0x7f */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0 = op a0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * neg-int, not-int, neg-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(t0) # t0 <- A+
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- addu a0, a0, 0x80000000 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vA <- result0
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_double: /* 0x80 */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0/result1 = op a0/a1".
- * This could be MIPS instruction or a function call.
- *
- * For: neg-long, not-long, neg-double,
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- vA
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- addu a1, a1, 0x80000000 # a0/a1 <- op, a2-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_long: /* 0x81 */
- /*
- * Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result0/result1 = op a0".
- *
- * For: int-to-long
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- sra a1, a0, 31 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_float: /* 0x82 */
- /*
- * Generic 32-bit floating-point unary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = op fa0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.s.w fv0, fa0
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t1) # vA <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_double: /* 0x83 */
- /*
- * Generic 32bit-to-64bit floating-point unary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = op fa0".
- *
- * For: int-to-double, float-to-double
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.d.w fv0, fa0
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- GET_OPB(a1) # a1 <- B from 15:12
- GET_OPA4(a0) # a0 <- A from 11:8
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[B]
- GET_INST_OPCODE(t0) # t0 <- opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[A] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_float: /* 0x85 */
- /*
- * long-to-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
- LOAD64_F(fv0, fv0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.s.l fv0, fv0
-#else
- LOAD64(rARG0, rARG1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- JAL(__floatdisf)
-#endif
-
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_double: /* 0x86 */
- /*
- * long-to-double
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
- LOAD64_F(fv0, fv0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.d.l fv0, fv0
-#else
- LOAD64(rARG0, rARG1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- JAL(__floatdidf) # a0/a1 <- op, a2-a3 changed
-#endif
-
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- result
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_int: /* 0x87 */
- /*
- * float-to-int
- *
- * We have to clip values to int min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us
- * for pre-R6.
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
-#ifndef MIPS32REVGE6
- li t0, INT_MIN_AS_FLOAT
- mtc1 t0, fa1
- c.ole.s fcc0, fa1, fa0
-#endif
- GET_INST_OPCODE(t1) # extract opcode from rINST
-#ifndef MIPS32REVGE6
- bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
- c.eq.s fcc0, fa0, fa0
- mtc1 zero, fa0
- movt.s fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-1:
-#endif
- trunc.w.s fa0, fa0
- SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_long: /* 0x88 */
- /*
- * float-to-long
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us
- * for pre-R6.
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
- GET_INST_OPCODE(t1) # extract opcode from rINST
- trunc.l.s fa0, fa0
- SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
-#else
- c.eq.s fcc0, fa0, fa0
- li rRESULT0, 0
- li rRESULT1, 0
- bc1f fcc0, .Lop_float_to_long_get_opcode
-
- li t0, LONG_MIN_AS_FLOAT
- mtc1 t0, fa1
- c.ole.s fcc0, fa0, fa1
- li rRESULT1, LONG_MIN_HIGH
- bc1t fcc0, .Lop_float_to_long_get_opcode
-
- neg.s fa1, fa1
- c.ole.s fcc0, fa1, fa0
- nor rRESULT0, rRESULT0, zero
- nor rRESULT1, rRESULT1, zero
- bc1t fcc0, .Lop_float_to_long_get_opcode
-
- JAL(__fixsfdi)
- GET_INST_OPCODE(t1) # extract opcode from rINST
- b .Lop_float_to_long_set_vreg
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_double: /* 0x89 */
- /*
- * Generic 32bit-to-64bit floating-point unary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = op fa0".
- *
- * For: int-to-double, float-to-double
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.d.s fv0, fa0
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_int: /* 0x8a */
- /*
- * double-to-int
- *
- * We have to clip values to int min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us
- * for pre-R6.
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64_F(fa0, fa0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-#ifndef MIPS32REVGE6
- li t0, INT_MIN_AS_DOUBLE_HIGH
- mtc1 zero, fa1
- MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
- c.ole.d fcc0, fa1, fa0
-#endif
- GET_INST_OPCODE(t1) # extract opcode from rINST
-#ifndef MIPS32REVGE6
- bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
- c.eq.d fcc0, fa0, fa0
- mtc1 zero, fa0
- MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
- movt.d fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-1:
-#endif
- trunc.w.d fa0, fa0
- SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_long: /* 0x8b */
- /*
- * double-to-long
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us
- * for pre-R6.
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64_F(fa0, fa0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
- GET_INST_OPCODE(t1) # extract opcode from rINST
- trunc.l.d fa0, fa0
- SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
-#else
- c.eq.d fcc0, fa0, fa0
- li rRESULT0, 0
- li rRESULT1, 0
- bc1f fcc0, .Lop_double_to_long_get_opcode
-
- li t0, LONG_MIN_AS_DOUBLE_HIGH
- mtc1 zero, fa1
- MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
- c.ole.d fcc0, fa0, fa1
- li rRESULT1, LONG_MIN_HIGH
- bc1t fcc0, .Lop_double_to_long_get_opcode
-
- neg.d fa1, fa1
- c.ole.d fcc0, fa1, fa0
- nor rRESULT0, rRESULT0, zero
- nor rRESULT1, rRESULT1, zero
- bc1t fcc0, .Lop_double_to_long_get_opcode
-
- JAL(__fixdfdi)
- GET_INST_OPCODE(t1) # extract opcode from rINST
- b .Lop_double_to_long_set_vreg
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_float: /* 0x8c */
- /*
- * Generic 64bit-to-32bit floating-point unary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = op fa0".
- *
- * For: double-to-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64_F(fa0, fa0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.s.d fv0, fa0
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_byte: /* 0x8d */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0 = op a0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * neg-int, not-int, neg-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(t0) # t0 <- A+
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- SEB(a0, a0) # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vA <- result0
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_char: /* 0x8e */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0 = op a0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * neg-int, not-int, neg-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(t0) # t0 <- A+
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- and a0, 0xffff # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vA <- result0
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_short: /* 0x8f */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0 = op a0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * neg-int, not-int, neg-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(t0) # t0 <- A+
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- SEH(a0, a0) # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vA <- result0
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int: /* 0x90 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int: /* 0x91 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- subu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int: /* 0x92 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int: /* 0x93 */
-#ifdef MIPS32REVGE6
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#else
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- div zero, a0, a1 # optional op
- mflo a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int: /* 0x94 */
-#ifdef MIPS32REVGE6
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#else
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- div zero, a0, a1 # optional op
- mfhi a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int: /* 0x95 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int: /* 0x96 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int: /* 0x97 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int: /* 0x98 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- sll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int: /* 0x99 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- sra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int: /* 0x9a */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- srl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long: /* 0x9b */
-/*
- * The compiler generates the following sequence for
- * [v1 v0] = [a1 a0] + [a3 a2];
- * addu v0,a2,a0
- * addu a1,a3,a1
- * sltu v1,v0,a2
- * addu v1,v1,a1
- */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- addu v0, a2, a0 # optional op
- addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long: /* 0x9c */
-/*
- * For little endian the code sequence looks as follows:
- * subu v0,a0,a2
- * subu v1,a1,a3
- * sltu a0,a0,v0
- * subu v1,v1,a0
- */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- subu v0, a0, a2 # optional op
- subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long: /* 0x9d */
- /*
- * Signed 64-bit integer multiply.
- * a1 a0
- * x a3 a2
- * -------------
- * a2a1 a2a0
- * a3a0
- * a3a1 (<= unused)
- * ---------------
- * v1 v0
- */
- /* mul-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- and t0, a0, 255 # a2 <- BB
- srl t1, a0, 8 # a3 <- CC
- EAS2(t0, rFP, t0) # t0 <- &fp[BB]
- LOAD64(a0, a1, t0) # a0/a1 <- vBB/vBB+1
-
- EAS2(t1, rFP, t1) # t0 <- &fp[CC]
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
-
- mul v1, a3, a0 # v1= a3a0
-#ifdef MIPS32REVGE6
- mulu v0, a2, a0 # v0= a2a0
- muhu t1, a2, a0
-#else
- multu a2, a0
- mfhi t1
- mflo v0 # v0= a2a0
-#endif
- mul t0, a2, a1 # t0= a2a1
- addu v1, v1, t1 # v1+= hi(a2a0)
- addu v1, v1, t0 # v1= a3a0 + a2a1;
-
- GET_OPA(a0) # a0 <- AA
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- b .Lop_mul_long_finish
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long: /* 0x9e */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 1
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- JAL(__divdi3) # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long: /* 0x9f */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 1
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- JAL(__moddi3) # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long: /* 0xa0 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- and a0, a0, a2 # optional op
- and a1, a1, a3 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long: /* 0xa1 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- or a0, a0, a2 # optional op
- or a1, a1, a3 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long: /* 0xa2 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- xor a0, a0, a2 # optional op
- xor a1, a1, a3 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long: /* 0xa3 */
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shl-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(t2) # t2 <- AA
- and a3, a0, 255 # a3 <- BB
- srl a0, a0, 8 # a0 <- CC
- EAS2(a3, rFP, a3) # a3 <- &fp[BB]
- GET_VREG(a2, a0) # a2 <- vCC
- LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v1, a2, 0x20 # shift< shift & 0x20
- sll v0, a0, a2 # rlo<- alo << (shift&31)
- bnez v1, .Lop_shl_long_finish
- not v1, a2 # rhi<- 31-shift (shift is 5b)
- srl a0, 1
- srl a0, v1 # alo<- alo >> (32-(shift&31))
- sll v1, a1, a2 # rhi<- ahi << (shift&31)
- or v1, a0 # rhi<- rhi | alo
- SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long: /* 0xa4 */
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shr-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(t3) # t3 <- AA
- and a3, a0, 255 # a3 <- BB
- srl a0, a0, 8 # a0 <- CC
- EAS2(a3, rFP, a3) # a3 <- &fp[BB]
- GET_VREG(a2, a0) # a2 <- vCC
- LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v0, a2, 0x20 # shift & 0x20
- sra v1, a1, a2 # rhi<- ahi >> (shift&31)
- bnez v0, .Lop_shr_long_finish
- srl v0, a0, a2 # rlo<- alo >> (shift&31)
- not a0, a2 # alo<- 31-shift (shift is 5b)
- sll a1, 1
- sll a1, a0 # ahi<- ahi << (32-(shift&31))
- or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/VAA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long: /* 0xa5 */
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* ushr-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a3, a0, 255 # a3 <- BB
- srl a0, a0, 8 # a0 <- CC
- EAS2(a3, rFP, a3) # a3 <- &fp[BB]
- GET_VREG(a2, a0) # a2 <- vCC
- LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v0, a2, 0x20 # shift & 0x20
- srl v1, a1, a2 # rhi<- ahi >> (shift&31)
- bnez v0, .Lop_ushr_long_finish
- srl v0, a0, a2 # rlo<- alo >> (shift&31)
- not a0, a2 # alo<- 31-n (shift is 5b)
- sll a1, 1
- sll a1, a0 # ahi<- ahi << (32-(shift&31))
- or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float: /* 0xa6 */
- /*
- * Generic 32-bit binary float operation.
- *
- * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
- */
-
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG_F(fa1, a3) # a1 <- vCC
- GET_VREG_F(fa0, a2) # a0 <- vBB
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- add.s fv0, fa0, fa1 # f0 = result
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float: /* 0xa7 */
- /*
- * Generic 32-bit binary float operation.
- *
- * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
- */
-
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG_F(fa1, a3) # a1 <- vCC
- GET_VREG_F(fa0, a2) # a0 <- vBB
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- sub.s fv0, fa0, fa1 # f0 = result
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float: /* 0xa8 */
- /*
- * Generic 32-bit binary float operation.
- *
- * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
- */
-
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG_F(fa1, a3) # a1 <- vCC
- GET_VREG_F(fa0, a2) # a0 <- vBB
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- mul.s fv0, fa0, fa1 # f0 = result
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float: /* 0xa9 */
- /*
- * Generic 32-bit binary float operation.
- *
- * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
- */
-
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG_F(fa1, a3) # a1 <- vCC
- GET_VREG_F(fa0, a2) # a0 <- vBB
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- div.s fv0, fa0, fa1 # f0 = result
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float: /* 0xaa */
- /*
- * Generic 32-bit binary float operation.
- *
- * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
- */
-
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG_F(fa1, a3) # a1 <- vCC
- GET_VREG_F(fa0, a2) # a0 <- vBB
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- JAL(fmodf) # f0 = result
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double: /* 0xab */
- /*
- * Generic 64-bit floating-point binary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * for: add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64_F(fa0, fa0f, a2)
- LOAD64_F(fa1, fa1f, t1)
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- add.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double: /* 0xac */
- /*
- * Generic 64-bit floating-point binary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * for: add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64_F(fa0, fa0f, a2)
- LOAD64_F(fa1, fa1f, t1)
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- sub.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double: /* 0xad */
- /*
- * Generic 64-bit floating-point binary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * for: add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64_F(fa0, fa0f, a2)
- LOAD64_F(fa1, fa1f, t1)
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- mul.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double: /* 0xae */
- /*
- * Generic 64-bit floating-point binary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * for: add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64_F(fa0, fa0f, a2)
- LOAD64_F(fa1, fa1f, t1)
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- div.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double: /* 0xaf */
- /*
- * Generic 64-bit floating-point binary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * for: add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64_F(fa0, fa0f, a2)
- LOAD64_F(fa1, fa1f, t1)
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- JAL(fmod)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- subu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-#ifdef MIPS32REVGE6
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#else
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- div zero, a0, a1 # optional op
- mflo a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-#ifdef MIPS32REVGE6
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#else
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- div zero, a0, a1 # optional op
- mfhi a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- sll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- sra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- srl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/*
- * See op_add_long.S for details
- */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- addu v0, a2, a0 # optional op
- addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/*
- * See op_sub_long.S for more details
- */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- subu v0, a0, a2 # optional op
- subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
- /*
- * See op_mul_long.S for more details
- */
- /* mul-long/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
-
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a0, a1, t0) # vAA.low / high
-
- GET_OPB(t1) # t1 <- B
- EAS2(t1, rFP, t1) # t1 <- &fp[B]
- LOAD64(a2, a3, t1) # vBB.low / high
-
- mul v1, a3, a0 # v1= a3a0
-#ifdef MIPS32REVGE6
- mulu v0, a2, a0 # v0= a2a0
- muhu t1, a2, a0
-#else
- multu a2, a0
- mfhi t1
- mflo v0 # v0= a2a0
- #endif
- mul t2, a2, a1 # t2= a2a1
- addu v1, v1, t1 # v1= a3a0 + hi(a2a0)
- addu v1, v1, t2 # v1= v1 + a2a1;
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t1) # vA/vA+1 <- v0(low)/v1(high)
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long_2addr: /* 0xbe */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 1
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- JAL(__divdi3) # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 1
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- JAL(__moddi3) # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- and a0, a0, a2 # optional op
- and a1, a1, a3 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- or a0, a0, a2 # optional op
- or a1, a1, a3 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- xor a0, a0, a2 # optional op
- xor a1, a1, a3 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a2, a3) # a2 <- vB
- EAS2(t2, rFP, rOBJ) # t2 <- &fp[A]
- LOAD64(a0, a1, t2) # a0/a1 <- vA/vA+1
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v1, a2, 0x20 # shift< shift & 0x20
- sll v0, a0, a2 # rlo<- alo << (shift&31)
- bnez v1, .Lop_shl_long_2addr_finish
- not v1, a2 # rhi<- 31-shift (shift is 5b)
- srl a0, 1
- srl a0, v1 # alo<- alo >> (32-(shift&31))
- sll v1, a1, a2 # rhi<- ahi << (shift&31)
- or v1, a0 # rhi<- rhi | alo
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shr-long/2addr vA, vB */
- GET_OPA4(t2) # t2 <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a2, a3) # a2 <- vB
- EAS2(t0, rFP, t2) # t0 <- &fp[A]
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v0, a2, 0x20 # shift & 0x20
- sra v1, a1, a2 # rhi<- ahi >> (shift&31)
- bnez v0, .Lop_shr_long_2addr_finish
- srl v0, a0, a2 # rlo<- alo >> (shift&31)
- not a0, a2 # alo<- 31-shift (shift is 5b)
- sll a1, 1
- sll a1, a0 # ahi<- ahi << (32-(shift&31))
- or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t2, t0) # vA/vA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* ushr-long/2addr vA, vB */
- GET_OPA4(t3) # t3 <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a2, a3) # a2 <- vB
- EAS2(t0, rFP, t3) # t0 <- &fp[A]
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v0, a2, 0x20 # shift & 0x20
- srl v1, a1, a2 # rhi<- ahi >> (shift&31)
- bnez v0, .Lop_ushr_long_2addr_finish
- srl v0, a0, a2 # rlo<- alo >> (shift&31)
- not a0, a2 # alo<- 31-n (shift is 5b)
- sll a1, 1
- sll a1, a0 # ahi<- ahi << (32-(shift&31))
- or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t3, t0) # vA/vA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, rOBJ)
- GET_VREG_F(fa1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- add.s fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, rOBJ)
- GET_VREG_F(fa1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- sub.s fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, rOBJ)
- GET_VREG_F(fa1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- mul.s fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, rOBJ)
- GET_VREG_F(fa1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- div.s fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float_2addr: /* 0xca */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, rOBJ)
- GET_VREG_F(fa1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- JAL(fmodf)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double_2addr: /* 0xcb */
- /*
- * Generic 64-bit floating-point "/2addr" binary operation.
- * Provide an "instr" line that specifies an instruction that
- * performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64_F(fa0, fa0f, t0)
- LOAD64_F(fa1, fa1f, a1)
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- add.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
- /*
- * Generic 64-bit floating-point "/2addr" binary operation.
- * Provide an "instr" line that specifies an instruction that
- * performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64_F(fa0, fa0f, t0)
- LOAD64_F(fa1, fa1f, a1)
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- sub.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
- /*
- * Generic 64-bit floating-point "/2addr" binary operation.
- * Provide an "instr" line that specifies an instruction that
- * performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64_F(fa0, fa0f, t0)
- LOAD64_F(fa1, fa1f, a1)
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- mul.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double_2addr: /* 0xce */
- /*
- * Generic 64-bit floating-point "/2addr" binary operation.
- * Provide an "instr" line that specifies an instruction that
- * performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64_F(fa0, fa0f, t0)
- LOAD64_F(fa1, fa1f, a1)
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- div.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
- /*
- * Generic 64-bit floating-point "/2addr" binary operation.
- * Provide an "instr" line that specifies an instruction that
- * performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64_F(fa0, fa0f, t0)
- LOAD64_F(fa1, fa1f, a1)
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- JAL(fmod)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 0
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 0
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- subu a0, a1, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 0
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-#ifdef MIPS32REVGE6
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 1
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#else
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 1
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- div zero, a0, a1 # optional op
- mflo a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-#ifdef MIPS32REVGE6
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 1
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#else
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 1
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- div zero, a0, a1 # optional op
- mfhi a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 0
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 0
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 0
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- subu a0, a1, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit8: /* 0xda */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-#ifdef MIPS32REVGE6
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#else
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- div zero, a0, a1 # optional op
- mflo a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-#ifdef MIPS32REVGE6
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#else
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- div zero, a0, a1 # optional op
- mfhi a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit8: /* 0xdd */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit8: /* 0xde */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- sll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- sra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- srl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_quick: /* 0xe3 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1
- lw a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
- /* iget-wide-quick vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1 # t0 <- a3 + a1
- LOAD64(a0, a1, t0) # a0 <- obj.field (64 bits, aligned)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[A] <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
- /* For: iget-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- FETCH(a1, 1) # a1 <- field byte offset
- EXPORT_PC()
- GET_VREG(a0, a2) # a0 <- object we're operating on
- JAL(artIGetObjectFromMterp) # v0 <- GetObj(obj, offset)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA4(a2) # a2<- A+
- PREFETCH_INST(2) # load rINST
- bnez a3, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_quick: /* 0xe6 */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- beqz a3, common_errNullObject # object was null
- GET_VREG(a0, a2) # a0 <- fp[A]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu t0, a3, a1
- GET_INST_OPCODE(t1) # extract opcode from rINST
- GET_OPCODE_TARGET(t1)
- sw a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- JR(t1) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
- /* iput-wide-quick vA, vB, offset@CCCC */
- GET_OPA4(a0) # a0 <- A(+)
- GET_OPB(a1) # a1 <- B
- GET_VREG(a2, a1) # a2 <- fp[B], the object pointer
- # check object for null
- beqz a2, common_errNullObject # object was null
- EAS2(a3, rFP, a0) # a3 <- &fp[A]
- LOAD64(a0, a1, a3) # a0/a1 <- fp[A]
- FETCH(a3, 1) # a3 <- field byte offset
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu a2, a2, a3 # obj.field (64 bits, aligned) <- a0/a1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0/a1
- JR(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
- /* For: iput-object-quick */
- /* op vA, vB, offset@CCCC */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- JAL(MterpIputObjectQuick)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuick
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeVirtualQuick)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuickRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeVirtualQuickRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- beqz a3, common_errNullObject # object was null
- GET_VREG(a0, a2) # a0 <- fp[A]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu t0, a3, a1
- GET_INST_OPCODE(t1) # extract opcode from rINST
- GET_OPCODE_TARGET(t1)
- sb a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- JR(t1) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte_quick: /* 0xec */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- beqz a3, common_errNullObject # object was null
- GET_VREG(a0, a2) # a0 <- fp[A]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu t0, a3, a1
- GET_INST_OPCODE(t1) # extract opcode from rINST
- GET_OPCODE_TARGET(t1)
- sb a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- JR(t1) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char_quick: /* 0xed */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- beqz a3, common_errNullObject # object was null
- GET_VREG(a0, a2) # a0 <- fp[A]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu t0, a3, a1
- GET_INST_OPCODE(t1) # extract opcode from rINST
- GET_OPCODE_TARGET(t1)
- sh a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- JR(t1) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short_quick: /* 0xee */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- beqz a3, common_errNullObject # object was null
- GET_VREG(a0, a2) # a0 <- fp[A]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu t0, a3, a1
- GET_INST_OPCODE(t1) # extract opcode from rINST
- GET_OPCODE_TARGET(t1)
- sh a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- JR(t1) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1
- lbu a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1
- lb a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1
- lhu a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1
- lh a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphic
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokePolymorphic)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(4)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphicRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokePolymorphicRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(4)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom: /* 0xfc */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustom
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeCustom)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustomRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeCustomRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_handle: /* 0xfe */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodHandle
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL(MterpConstMethodHandle) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
- PREFETCH_INST(2) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_type: /* 0xff */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodType
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL(MterpConstMethodType) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
- PREFETCH_INST(2) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
- .balign 128
-
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-#ifndef MIPS32REVGE6
-.Lop_float_to_long_get_opcode:
- GET_INST_OPCODE(t1) # extract opcode from rINST
-.Lop_float_to_long_set_vreg:
- SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1) # vA/vA+1 <- v0/v1
-#endif
-
-#ifndef MIPS32REVGE6
-.Lop_double_to_long_get_opcode:
- GET_INST_OPCODE(t1) # extract opcode from rINST
-.Lop_double_to_long_set_vreg:
- SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1) # vA/vA+1 <- v0/v1
-#endif
-
-.Lop_mul_long_finish:
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, a0, t0) # vAA/vAA+1 <- v0(low)/v1(high)
-
-.Lop_shl_long_finish:
- SET_VREG64_GOTO(zero, v0, t2, t0) # vAA/vAA+1 <- rlo/rhi
-
-.Lop_shr_long_finish:
- sra a3, a1, 31 # a3<- sign(ah)
- SET_VREG64_GOTO(v1, a3, t3, t0) # vAA/VAA+1 <- rlo/rhi
-
-.Lop_ushr_long_finish:
- SET_VREG64_GOTO(v1, zero, rOBJ, t0) # vAA/vAA+1 <- rlo/rhi
-
-.Lop_shl_long_2addr_finish:
- SET_VREG64_GOTO(zero, v0, rOBJ, t0) # vA/vA+1 <- rlo/rhi
-
-.Lop_shr_long_2addr_finish:
- sra a3, a1, 31 # a3<- sign(ah)
- SET_VREG64_GOTO(v1, a3, t2, t0) # vA/vA+1 <- rlo/rhi
-
-.Lop_ushr_long_2addr_finish:
- SET_VREG64_GOTO(v1, zero, t3, t0) # vA/vA+1 <- rlo/rhi
-
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (0 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move: /* 0x01 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (1 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (2 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (3 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (4 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (5 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (6 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (7 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (8 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (9 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (10 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (11 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (12 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (13 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (14 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return: /* 0x0f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (15 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (16 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (17 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (18 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (19 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const: /* 0x14 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (20 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (21 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (22 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (23 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (24 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (25 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (26 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (27 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (28 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (29 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (30 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (31 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (32 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (33 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (34 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (35 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (36 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (37 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (38 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (39 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (40 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (41 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (42 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (43 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (44 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (45 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (46 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (47 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (48 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (49 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (50 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (51 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (52 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (53 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (54 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (55 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (56 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (57 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (58 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (59 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (60 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (61 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (62 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (63 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (64 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (65 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (66 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (67 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (68 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (69 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (70 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (71 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (72 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (73 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (74 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (75 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (76 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (77 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (78 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (79 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (80 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (81 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (82 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (83 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (84 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (85 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (86 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (87 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (88 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (89 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (90 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (91 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (92 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (93 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (94 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (95 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (96 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (97 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (98 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (99 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (100 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (101 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (102 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (103 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (104 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (105 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (106 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (107 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (108 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (109 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (110 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (111 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (112 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (113 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (114 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (115 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (116 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (117 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (118 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (119 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (120 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (121 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (122 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (123 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (124 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (125 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (126 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (127 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (128 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (129 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (130 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (131 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (132 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (133 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (134 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (135 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (136 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (137 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (138 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (139 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (140 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (141 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (142 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (143 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (144 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (145 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (146 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (147 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (148 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (149 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (150 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (151 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (152 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (153 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (154 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (155 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (156 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (157 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (158 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (159 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (160 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (161 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (162 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (163 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (164 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (165 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (166 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (167 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (168 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (169 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (170 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (171 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (172 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (173 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (174 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (175 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (176 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (177 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (178 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (179 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (180 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (181 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (182 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (183 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (184 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (185 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (186 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (187 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (188 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (189 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (190 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (191 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (192 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (193 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (194 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (195 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (196 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (197 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (198 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (199 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (200 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (201 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (202 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (203 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (204 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (205 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (206 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (207 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (208 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (209 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (210 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (211 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (212 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (213 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (214 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (215 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (216 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (217 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (218 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (219 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (220 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (221 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (222 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (223 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (224 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (225 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (226 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (227 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (228 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (229 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (230 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (231 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (232 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (233 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (234 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (235 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (236 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (237 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (238 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (239 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (240 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (241 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (242 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (243 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (244 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (245 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (246 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (247 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (248 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (249 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (250 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (251 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (252 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (253 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (254 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (255 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
- .balign 128
-
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogDivideByZeroException)
-#endif
- b MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogArrayIndexException)
-#endif
- b MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogNegativeArraySizeException)
-#endif
- b MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogNoSuchMethodException)
-#endif
- b MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogNullObjectException)
-#endif
- b MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogExceptionThrownException)
-#endif
- b MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- lw a2, THREAD_FLAGS_OFFSET(rSELF)
- JAL(MterpLogSuspendFallback)
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- lw a0, THREAD_EXCEPTION_OFFSET(rSELF)
- beqz a0, MterpFallback # If exception, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpHandleException) # (self, shadow_frame)
- beqz v0, MterpExceptionReturn # no local catch, back to caller.
- lw a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
- lw a1, OFF_FP_DEX_PC(rFP)
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
- EAS1(rPC, a0, a1) # generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- /* resume execution at catch block */
- EXPORT_PC()
- FETCH_INST()
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 32 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- */
-MterpCommonTakenBranchNoFlags:
- bgtz rINST, .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_osr_check
- blt rPROFILE, t0, .L_resume_backward_branch
- subu rPROFILE, 1
- beqz rPROFILE, .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- REFRESH_IBASE()
- addu a2, rINST, rINST # a2<- byte offset
- FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- bnez ra, .L_suspend_request_pending
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC()
- move a0, rSELF
- JAL(MterpSuspendCheck) # (self)
- bnez v0, MterpFallback
- REFRESH_IBASE() # might have changed during suspend
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-.L_no_count_backwards:
- li t0, JIT_CHECK_OSR # check for possible OSR re-entry
- bne rPROFILE, t0, .L_resume_backward_branch
-.L_osr_check:
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- EXPORT_PC()
- JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- bnez v0, MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- li t0, JIT_CHECK_OSR # check for possible OSR re-entry
- beq rPROFILE, t0, .L_check_osr_forward
-.L_resume_forward_branch:
- add a2, rINST, rINST # a2<- byte offset
- FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-.L_check_osr_forward:
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- EXPORT_PC()
- JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- bnez v0, MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- addu a1, rFP, OFF_FP_SHADOWFRAME
- sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
- lw a0, OFF_FP_METHOD(rFP)
- move a2, rSELF
- JAL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- move rPROFILE, v0 # restore new hotness countdown to rPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- li a2, 2
- EXPORT_PC()
- JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- bnez v0, MterpOnStackReplacement
- FETCH_ADVANCE_INST(2)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- JAL(MterpLogOSR)
-#endif
- li v0, 1 # Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogFallback)
-#endif
-MterpCommonFallback:
- move v0, zero # signal retry with reference interpreter.
- b MterpDone
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR. Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- li v0, 1 # signal return to caller.
- b MterpDone
-MterpReturn:
- lw a2, OFF_FP_RESULT_REGISTER(rFP)
- sw v0, 0(a2)
- sw v1, 4(a2)
- li v0, 1 # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- blez rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
- move rINST, v0 # stash return value
- /* Report cached hotness counts */
- lw a0, OFF_FP_METHOD(rFP)
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rSELF
- sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
- JAL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- move v0, rINST # restore return value
-
-.L_pop_and_return:
-/* Restore from the stack and return. Frame size = STACK_SIZE */
- STACK_LOAD_FULL()
- jalr zero, ra
-
- .cfi_endproc
- .end ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
deleted file mode 100644
index fa07442..0000000
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ /dev/null
@@ -1,10963 +0,0 @@
-/* DO NOT EDIT: This file was generated by gen-mterp.py. */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define zero $0 /* always zero */
-#define AT $at /* assembler temp */
-#define v0 $2 /* return value */
-#define v1 $3
-#define a0 $4 /* argument registers */
-#define a1 $5
-#define a2 $6
-#define a3 $7
-#define a4 $8 /* expanded register arguments */
-#define a5 $9
-#define a6 $10
-#define a7 $11
-#define ta0 $8 /* alias */
-#define ta1 $9
-#define ta2 $10
-#define ta3 $11
-#define t0 $12 /* temp registers (not saved across subroutine calls) */
-#define t1 $13
-#define t2 $14
-#define t3 $15
-
-#define s0 $16 /* saved across subroutine calls (callee saved) */
-#define s1 $17
-#define s2 $18
-#define s3 $19
-#define s4 $20
-#define s5 $21
-#define s6 $22
-#define s7 $23
-#define t8 $24 /* two more temp registers */
-#define t9 $25
-#define k0 $26 /* kernel temporary */
-#define k1 $27
-#define gp $28 /* global pointer */
-#define sp $29 /* stack pointer */
-#define s8 $30 /* one more callee saved */
-#define ra $31 /* return address */
-
-#define f0 $f0
-#define f1 $f1
-#define f2 $f2
-#define f3 $f3
-#define f12 $f12
-#define f13 $f13
-
-/*
- * It looks like the GNU assembler currently does not support the blec and bgtc
- * idioms, which should translate into bgec and bltc respectively with swapped
- * left and right register operands.
- * TODO: remove these macros when the assembler is fixed.
- */
-.macro blec lreg, rreg, target
- bgec \rreg, \lreg, \target
-.endm
-.macro bgtc lreg, rreg, target
- bltc \rreg, \lreg, \target
-.endm
-
-/*
-Mterp and MIPS64 notes:
-
-The following registers have fixed assignments:
-
- reg nick purpose
- s0 rPC interpreted program counter, used for fetching instructions
- s1 rFP interpreted frame pointer, used for accessing locals and args
- s2 rSELF self (Thread) pointer
- s3 rINST first 16-bit code unit of current instruction
- s4 rIBASE interpreted instruction base pointer, used for computed goto
- s5 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
- s6 rPROFILE jit profile hotness countdown
-*/
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rPC s0
-#define CFI_DEX 16 // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP 4 // DWARF register number of the first argument register (a0).
-#define rFP s1
-#define rSELF s2
-#define rINST s3
-#define rIBASE s4
-#define rREFS s5
-#define rPROFILE s6
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- sd rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- ld rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINST. Does not advance rPC.
- */
-.macro FETCH_INST
- lhu rINST, 0(rPC)
-.endm
-
-/* Advance rPC by some number of code units. */
-.macro ADVANCE count
- daddu rPC, rPC, (\count) * 2
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg and advance xPC.
- * xPC to point to the next instruction. "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value. Must not set flags.
- *
- */
-.macro FETCH_ADVANCE_INST_RB reg
- daddu rPC, rPC, \reg
- FETCH_INST
-.endm
-
-/*
- * Fetch the next instruction from the specified offset. Advances rPC
- * to point to the next instruction.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
- ADVANCE \count
- FETCH_INST
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
- * rINST ahead of possible exception point. Be sure to manually advance rPC
- * later.
- */
-.macro PREFETCH_INST count
- lhu rINST, ((\count) * 2)(rPC)
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
- and \reg, rINST, 255
-.endm
-
-/*
- * Begin executing the opcode in _reg.
- */
-.macro GOTO_OPCODE reg
- .set noat
- sll AT, \reg, 7
- daddu AT, rIBASE, AT
- jic AT, 0
- .set at
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- * Note, GET_VREG does sign extension to 64 bits while
- * GET_VREG_U does zero extension to 64 bits.
- * One is useful for arithmetic while the other is
- * useful for storing the result value as 64-bit.
- */
-.macro GET_VREG reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lw \reg, 0(AT)
- .set at
-.endm
-.macro GET_VREG_U reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lwu \reg, 0(AT)
- .set at
-.endm
-.macro GET_VREG_FLOAT reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lwc1 \reg, 0(AT)
- .set at
-.endm
-.macro SET_VREG reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- sw \reg, 0(AT)
- dlsa AT, \vreg, rREFS, 2
- sw zero, 0(AT)
- .set at
-.endm
-.macro SET_VREG_OBJECT reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- sw \reg, 0(AT)
- dlsa AT, \vreg, rREFS, 2
- sw \reg, 0(AT)
- .set at
-.endm
-.macro SET_VREG_FLOAT reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- swc1 \reg, 0(AT)
- dlsa AT, \vreg, rREFS, 2
- sw zero, 0(AT)
- .set at
-.endm
-
-/*
- * Get/set the 64-bit value from a Dalvik register.
- * Avoid unaligned memory accesses.
- * Note, SET_VREG_WIDE clobbers the register containing the value being stored.
- * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
- */
-.macro GET_VREG_WIDE reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lw \reg, 0(AT)
- lw AT, 4(AT)
- dinsu \reg, AT, 32, 32
- .set at
-.endm
-.macro GET_VREG_DOUBLE reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lwc1 \reg, 0(AT)
- lw AT, 4(AT)
- mthc1 AT, \reg
- .set at
-.endm
-.macro SET_VREG_WIDE reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- sw \reg, 0(AT)
- drotr32 \reg, \reg, 0
- sw \reg, 4(AT)
- dlsa AT, \vreg, rREFS, 2
- sw zero, 0(AT)
- sw zero, 4(AT)
- .set at
-.endm
-.macro SET_VREG_DOUBLE reg, vreg
- .set noat
- dlsa AT, \vreg, rREFS, 2
- sw zero, 0(AT)
- sw zero, 4(AT)
- dlsa AT, \vreg, rFP, 2
- swc1 \reg, 0(AT)
- mfhc1 \vreg, \reg
- sw \vreg, 4(AT)
- .set at
-.endm
-
-/*
- * On-stack offsets for spilling/unspilling callee-saved registers
- * and the frame size.
- */
-#define STACK_OFFSET_RA 0
-#define STACK_OFFSET_GP 8
-#define STACK_OFFSET_S0 16
-#define STACK_OFFSET_S1 24
-#define STACK_OFFSET_S2 32
-#define STACK_OFFSET_S3 40
-#define STACK_OFFSET_S4 48
-#define STACK_OFFSET_S5 56
-#define STACK_OFFSET_S6 64
-#define STACK_SIZE 80 /* needs 16 byte alignment */
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN 0x80000000
-#define INT_MIN_AS_FLOAT 0xCF000000
-#define INT_MIN_AS_DOUBLE 0xC1E0000000000000
-#define LONG_MIN 0x8000000000000000
-#define LONG_MIN_AS_FLOAT 0xDF000000
-#define LONG_MIN_AS_DOUBLE 0xC3E0000000000000
-
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Interpreter entry point.
- */
-
- .set reorder
-
- .text
- .global ExecuteMterpImpl
- .type ExecuteMterpImpl, %function
- .balign 16
-/*
- * On entry:
- * a0 Thread* self
- * a1 dex_instructions
- * a2 ShadowFrame
- * a3 JValue* result_register
- *
- */
-ExecuteMterpImpl:
- .cfi_startproc
- .cpsetup t9, t8, ExecuteMterpImpl
-
- .cfi_def_cfa sp, 0
- daddu sp, sp, -STACK_SIZE
- .cfi_adjust_cfa_offset STACK_SIZE
-
- sd t8, STACK_OFFSET_GP(sp)
- .cfi_rel_offset 28, STACK_OFFSET_GP
- sd ra, STACK_OFFSET_RA(sp)
- .cfi_rel_offset 31, STACK_OFFSET_RA
-
- sd s0, STACK_OFFSET_S0(sp)
- .cfi_rel_offset 16, STACK_OFFSET_S0
- sd s1, STACK_OFFSET_S1(sp)
- .cfi_rel_offset 17, STACK_OFFSET_S1
- sd s2, STACK_OFFSET_S2(sp)
- .cfi_rel_offset 18, STACK_OFFSET_S2
- sd s3, STACK_OFFSET_S3(sp)
- .cfi_rel_offset 19, STACK_OFFSET_S3
- sd s4, STACK_OFFSET_S4(sp)
- .cfi_rel_offset 20, STACK_OFFSET_S4
- sd s5, STACK_OFFSET_S5(sp)
- .cfi_rel_offset 21, STACK_OFFSET_S5
- sd s6, STACK_OFFSET_S6(sp)
- .cfi_rel_offset 22, STACK_OFFSET_S6
-
- /* Remember the return register */
- sd a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
- /* Remember the dex instruction pointer */
- sd a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
- /* set up "named" registers */
- move rSELF, a0
- daddu rFP, a2, SHADOWFRAME_VREGS_OFFSET
- lw v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
- dlsa rREFS, v0, rFP, 2
- lw v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
- dlsa rPC, v0, a1, 1
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- REFRESH_IBASE
-
- /* Set up for backwards branches & osr profiling */
- ld a0, OFF_FP_METHOD(rFP)
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rSELF
- jal MterpSetUpHotnessCountdown
- move rPROFILE, v0 # Starting hotness countdown to rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
- /* NOTE: no fallthrough */
-
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_op_nop: /* 0x00 */
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move: /* 0x01 */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT a0, a2 # vA <- vB
- .else
- SET_VREG a0, a2 # vA <- vB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_from16: /* 0x02 */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- lhu a3, 2(rPC) # a3 <- BBBB
- srl a2, rINST, 8 # a2 <- AA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vBBBB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT a0, a2 # vAA <- vBBBB
- .else
- SET_VREG a0, a2 # vAA <- vBBBB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_16: /* 0x03 */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- lhu a3, 4(rPC) # a3 <- BBBB
- lhu a2, 2(rPC) # a2 <- AAAA
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vBBBB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT a0, a2 # vAAAA <- vBBBB
- .else
- SET_VREG a0, a2 # vAAAA <- vBBBB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide: /* 0x04 */
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- ext a3, rINST, 12, 4 # a3 <- B
- ext a2, rINST, 8, 4 # a2 <- A
- GET_VREG_WIDE a0, a3 # a0 <- vB
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- vB
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_from16: /* 0x05 */
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- lhu a3, 2(rPC) # a3 <- BBBB
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_WIDE a0, a3 # a0 <- vBBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- vBBBB
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_16: /* 0x06 */
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- lhu a3, 4(rPC) # a3 <- BBBB
- lhu a2, 2(rPC) # a2 <- AAAA
- GET_VREG_WIDE a0, a3 # a0 <- vBBBB
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAAAA <- vBBBB
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object: /* 0x07 */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT a0, a2 # vA <- vB
- .else
- SET_VREG a0, a2 # vA <- vB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_from16: /* 0x08 */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- lhu a3, 2(rPC) # a3 <- BBBB
- srl a2, rINST, 8 # a2 <- AA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vBBBB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT a0, a2 # vAA <- vBBBB
- .else
- SET_VREG a0, a2 # vAA <- vBBBB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_16: /* 0x09 */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- lhu a3, 4(rPC) # a3 <- BBBB
- lhu a2, 2(rPC) # a2 <- AAAA
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vBBBB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT a0, a2 # vAAAA <- vBBBB
- .else
- SET_VREG a0, a2 # vAAAA <- vBBBB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result: /* 0x0a */
- /* for: move-result, move-result-object */
- /* op vAA */
- srl a2, rINST, 8 # a2 <- AA
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- lw a0, 0(a0) # a0 <- result.i
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT a0, a2 # vAA <- result
- .else
- SET_VREG a0, a2 # vAA <- result
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_wide: /* 0x0b */
- /* for: move-result-wide */
- /* op vAA */
- srl a2, rINST, 8 # a2 <- AA
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- ld a0, 0(a0) # a0 <- result.j
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- result
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_object: /* 0x0c */
- /* for: move-result, move-result-object */
- /* op vAA */
- srl a2, rINST, 8 # a2 <- AA
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- lw a0, 0(a0) # a0 <- result.i
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT a0, a2 # vAA <- result
- .else
- SET_VREG a0, a2 # vAA <- result
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_exception: /* 0x0d */
- /* move-exception vAA */
- srl a2, rINST, 8 # a2 <- AA
- ld a0, THREAD_EXCEPTION_OFFSET(rSELF) # load exception obj
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- SET_VREG_OBJECT a0, a2 # vAA <- exception obj
- GET_INST_OPCODE v0 # extract opcode from rINST
- sd zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void: /* 0x0e */
- .extern MterpThreadFenceForConstructor
- .extern MterpSuspendCheck
- jal MterpThreadFenceForConstructor
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- li a0, 0
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return: /* 0x0f */
- /*
- * Return a 32-bit value.
- *
- * for: return (sign-extend), return-object (zero-extend)
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- .extern MterpSuspendCheck
- jal MterpThreadFenceForConstructor
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a2 # a0 <- vAA
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_wide: /* 0x10 */
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- .extern MterpSuspendCheck
- jal MterpThreadFenceForConstructor
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_WIDE a0, a2 # a0 <- vAA
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_object: /* 0x11 */
- /*
- * Return a 32-bit value.
- *
- * for: return (sign-extend), return-object (zero-extend)
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- .extern MterpSuspendCheck
- jal MterpThreadFenceForConstructor
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_4: /* 0x12 */
- /* const/4 vA, #+B */
- ext a2, rINST, 8, 4 # a2 <- A
- seh a0, rINST # sign extend B in rINST
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- sra a0, a0, 12 # shift B into its final position
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- +B
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_16: /* 0x13 */
- /* const/16 vAA, #+BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- sign-extended BBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- +BBBB
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const: /* 0x14 */
- /* const vAA, #+BBBBbbbb */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- bbbb (low)
- lh a1, 4(rPC) # a1 <- BBBB (high)
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- ins a0, a1, 16, 16 # a0 = BBBBbbbb
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- +BBBBbbbb
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_high16: /* 0x15 */
- /* const/high16 vAA, #+BBBB0000 */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- BBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- sll a0, a0, 16 # a0 <- BBBB0000
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- +BBBB0000
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_16: /* 0x16 */
- /* const-wide/16 vAA, #+BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- sign-extended BBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- +BBBB
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_32: /* 0x17 */
- /* const-wide/32 vAA, #+BBBBbbbb */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- bbbb (low)
- lh a1, 4(rPC) # a1 <- BBBB (high)
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- ins a0, a1, 16, 16 # a0 = BBBBbbbb
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- +BBBBbbbb
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide: /* 0x18 */
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- srl a4, rINST, 8 # a4 <- AA
- lh a0, 2(rPC) # a0 <- bbbb (low)
- lh a1, 4(rPC) # a1 <- BBBB (low middle)
- lh a2, 6(rPC) # a2 <- hhhh (high middle)
- lh a3, 8(rPC) # a3 <- HHHH (high)
- FETCH_ADVANCE_INST 5 # advance rPC, load rINST
- ins a0, a1, 16, 16 # a0 = BBBBbbbb
- ins a2, a3, 16, 16 # a2 = HHHHhhhh
- dinsu a0, a2, 32, 32 # a0 = HHHHhhhhBBBBbbbb
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- +HHHHhhhhBBBBbbbb
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_high16: /* 0x19 */
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- BBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- dsll32 a0, a0, 16 # a0 <- BBBB000000000000
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- +BBBB000000000000
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string: /* 0x1a */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstString
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal MterpConstString # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
- /* const/string vAA, String//BBBBBBBB */
- .extern MterpConstString
- EXPORT_PC
- lh a0, 2(rPC) # a0 <- bbbb (low)
- lh a4, 4(rPC) # a4 <- BBBB (high)
- srl a1, rINST, 8 # a1 <- AA
- ins a0, a4, 16, 16 # a0 <- BBBBbbbb
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal MterpConstString # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 3 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 3 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_class: /* 0x1c */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstClass
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal MterpConstClass # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_enter: /* 0x1d */
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- .extern artLockObjectFromCode
- EXPORT_PC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA (object)
- move a1, rSELF # a1 <- self
- jal artLockObjectFromCode
- bnezc v0, MterpException
- FETCH_ADVANCE_INST 1
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_exit: /* 0x1e */
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- .extern artUnlockObjectFromCode
- EXPORT_PC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA (object)
- move a1, rSELF # a1 <- self
- jal artUnlockObjectFromCode # v0 <- success for unlock(self, obj)
- bnezc v0, MterpException
- FETCH_ADVANCE_INST 1 # before throw: advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_check_cast: /* 0x1f */
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class//BBBB */
- .extern MterpCheckCast
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- dlsa a1, a1, rFP, 2 # a1 <- &object
- ld a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- jal MterpCheckCast # (index, &obj, method, self)
- PREFETCH_INST 2
- bnez v0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_instance_of: /* 0x20 */
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class//CCCC */
- .extern MterpInstanceOf
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- CCCC
- srl a1, rINST, 12 # a1 <- B
- dlsa a1, a1, rFP, 2 # a1 <- &object
- ld a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- jal MterpInstanceOf # (index, &obj, method, self)
- ld a1, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a1, MterpException
- ADVANCE 2 # advance rPC
- SET_VREG v0, a2 # vA <- v0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_array_length: /* 0x21 */
- /*
- * Return the length of an array.
- */
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a0, a1 # a0 <- vB (object ref)
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a0, common_errNullObject # yup, fail
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- array length
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a3, a2 # vB <- length
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_instance: /* 0x22 */
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class//BBBB */
- .extern MterpNewInstance
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rSELF
- move a2, rINST
- jal MterpNewInstance # (shadow_frame, self, inst_data)
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_array: /* 0x23 */
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class//CCCC */
- .extern MterpNewArray
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- jal MterpNewArray
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array: /* 0x24 */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
- .extern MterpFilledNewArray
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rSELF
- jal MterpFilledNewArray
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
- .extern MterpFilledNewArrayRange
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rSELF
- jal MterpFilledNewArrayRange
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_fill_array_data: /* 0x26 */
- /* fill-array-data vAA, +BBBBBBBB */
- .extern MterpFillArrayData
- EXPORT_PC
- lh a1, 2(rPC) # a1 <- bbbb (lo)
- lh a0, 4(rPC) # a0 <- BBBB (hi)
- srl a3, rINST, 8 # a3 <- AA
- ins a1, a0, 16, 16 # a1 <- BBBBbbbb
- GET_VREG_U a0, a3 # a0 <- vAA (array object)
- dlsa a1, a1, rPC, 1 # a1 <- PC + BBBBbbbb*2 (array data off.)
- jal MterpFillArrayData # (obj, payload)
- beqzc v0, MterpPossibleException # exception?
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_throw: /* 0x27 */
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA (exception object)
- beqzc a0, common_errNullObject
- sd a0, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj
- b MterpException
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto: /* 0x28 */
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- srl rINST, rINST, 8
- seb rINST, rINST # rINST <- offset (sign-extended AA)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_16: /* 0x29 */
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- lh rINST, 2(rPC) # rINST <- offset (sign-extended AAAA)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_32: /* 0x2a */
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0".
- */
- /* goto/32 +AAAAAAAA */
- lh rINST, 2(rPC) # rINST <- aaaa (low)
- lh a1, 4(rPC) # a1 <- AAAA (high)
- ins rINST, a1, 16, 16 # rINST <- offset (sign-extended AAAAaaaa)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_packed_switch: /* 0x2b */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBBBBBB */
- .extern MterpDoPackedSwitch
- lh a0, 2(rPC) # a0 <- bbbb (lo)
- lh a1, 4(rPC) # a1 <- BBBB (hi)
- srl a3, rINST, 8 # a3 <- AA
- ins a0, a1, 16, 16 # a0 <- BBBBbbbb
- GET_VREG a1, a3 # a1 <- vAA
- dlsa a0, a0, rPC, 1 # a0 <- PC + BBBBbbbb*2
- jal MterpDoPackedSwitch # v0 <- code-unit branch offset
- move rINST, v0
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_sparse_switch: /* 0x2c */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBBBBBB */
- .extern MterpDoSparseSwitch
- lh a0, 2(rPC) # a0 <- bbbb (lo)
- lh a1, 4(rPC) # a1 <- BBBB (hi)
- srl a3, rINST, 8 # a3 <- AA
- ins a0, a1, 16, 16 # a0 <- BBBBbbbb
- GET_VREG a1, a3 # a1 <- vAA
- dlsa a0, a0, rPC, 1 # a0 <- PC + BBBBbbbb*2
- jal MterpDoSparseSwitch # v0 <- code-unit branch offset
- move rINST, v0
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_float: /* 0x2d */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * For: cmpl-float, cmpg-float
- */
- /* op vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- cmp.eq.s f2, f0, f1
- li a0, 0
- bc1nez f2, 1f # done if vBB == vCC (ordered)
- .if 0
- cmp.lt.s f2, f0, f1
- li a0, -1
- bc1nez f2, 1f # done if vBB < vCC (ordered)
- li a0, 1 # vBB > vCC or unordered
- .else
- cmp.lt.s f2, f1, f0
- li a0, 1
- bc1nez f2, 1f # done if vBB > vCC (ordered)
- li a0, -1 # vBB < vCC or unordered
- .endif
-1:
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_float: /* 0x2e */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * For: cmpl-float, cmpg-float
- */
- /* op vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- cmp.eq.s f2, f0, f1
- li a0, 0
- bc1nez f2, 1f # done if vBB == vCC (ordered)
- .if 1
- cmp.lt.s f2, f0, f1
- li a0, -1
- bc1nez f2, 1f # done if vBB < vCC (ordered)
- li a0, 1 # vBB > vCC or unordered
- .else
- cmp.lt.s f2, f1, f0
- li a0, 1
- bc1nez f2, 1f # done if vBB > vCC (ordered)
- li a0, -1 # vBB < vCC or unordered
- .endif
-1:
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_double: /* 0x2f */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * For: cmpl-double, cmpg-double
- */
- /* op vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- cmp.eq.d f2, f0, f1
- li a0, 0
- bc1nez f2, 1f # done if vBB == vCC (ordered)
- .if 0
- cmp.lt.d f2, f0, f1
- li a0, -1
- bc1nez f2, 1f # done if vBB < vCC (ordered)
- li a0, 1 # vBB > vCC or unordered
- .else
- cmp.lt.d f2, f1, f0
- li a0, 1
- bc1nez f2, 1f # done if vBB > vCC (ordered)
- li a0, -1 # vBB < vCC or unordered
- .endif
-1:
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_double: /* 0x30 */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * For: cmpl-double, cmpg-double
- */
- /* op vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- cmp.eq.d f2, f0, f1
- li a0, 0
- bc1nez f2, 1f # done if vBB == vCC (ordered)
- .if 1
- cmp.lt.d f2, f0, f1
- li a0, -1
- bc1nez f2, 1f # done if vBB < vCC (ordered)
- li a0, 1 # vBB > vCC or unordered
- .else
- cmp.lt.d f2, f1, f0
- li a0, 1
- bc1nez f2, 1f # done if vBB > vCC (ordered)
- li a0, -1 # vBB < vCC or unordered
- .endif
-1:
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmp_long: /* 0x31 */
- /* cmp-long vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- slt a2, a0, a1
- slt a0, a1, a0
- subu a0, a0, a2
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- result
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eq: /* 0x32 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-le" you would use "le".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- beqc a0, a1, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ne: /* 0x33 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-le" you would use "le".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- bnec a0, a1, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lt: /* 0x34 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-le" you would use "le".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- bltc a0, a1, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ge: /* 0x35 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-le" you would use "le".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- bgec a0, a1, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gt: /* 0x36 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-le" you would use "le".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- bgtc a0, a1, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_le: /* 0x37 */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-le" you would use "le".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- blec a0, a1, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eqz: /* 0x38 */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-lez" you would use "le".
- *
- * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
- GET_VREG a0, a2 # a0 <- vAA
- beqzc a0, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_nez: /* 0x39 */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-lez" you would use "le".
- *
- * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
- GET_VREG a0, a2 # a0 <- vAA
- bnezc a0, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ltz: /* 0x3a */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-lez" you would use "le".
- *
- * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
- GET_VREG a0, a2 # a0 <- vAA
- bltzc a0, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gez: /* 0x3b */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-lez" you would use "le".
- *
- * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
- GET_VREG a0, a2 # a0 <- vAA
- bgezc a0, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gtz: /* 0x3c */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-lez" you would use "le".
- *
- * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
- GET_VREG a0, a2 # a0 <- vAA
- bgtzc a0, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lez: /* 0x3d */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-lez" you would use "le".
- *
- * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
- GET_VREG a0, a2 # a0 <- vAA
- blezc a0, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3e: /* 0x3e */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3f: /* 0x3f */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_40: /* 0x40 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_41: /* 0x41 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_42: /* 0x42 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_43: /* 0x43 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget: /* 0x44 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 2
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 2 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- lw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_wide: /* 0x45 */
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- */
- /* aget-wide vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- lw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
- lw a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
- dinsu a2, a3, 32, 32 # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_object: /* 0x46 */
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- .extern artAGetObjectFromMterp
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- EXPORT_PC
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- jal artAGetObjectFromMterp # (array, index)
- ld a1, THREAD_EXCEPTION_OFFSET(rSELF)
- srl a4, rINST, 8 # a4 <- AA
- PREFETCH_INST 2
- bnez a1, MterpException
- SET_VREG_OBJECT v0, a4 # vAA <- v0
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_boolean: /* 0x47 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 0
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- lbu a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_byte: /* 0x48 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 0
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- lb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_char: /* 0x49 */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 1
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- lhu a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_short: /* 0x4a */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 1
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- lh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput: /* 0x4b */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 2
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 2 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a2, a4 # a2 <- vAA
- GET_INST_OPCODE v0 # extract opcode from rINST
- sw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_wide: /* 0x4c */
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- */
- /* aput-wide vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- GET_VREG_WIDE a2, a4 # a2 <- vAA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- sw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
- dsrl32 a2, a2, 0
- sw a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_object: /* 0x4d */
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- .extern MterpAputObject
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- jal MterpAputObject
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_boolean: /* 0x4e */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 0
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a2, a4 # a2 <- vAA
- GET_INST_OPCODE v0 # extract opcode from rINST
- sb a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_byte: /* 0x4f */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 0
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a2, a4 # a2 <- vAA
- GET_INST_OPCODE v0 # extract opcode from rINST
- sb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_char: /* 0x50 */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 1
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a2, a4 # a2 <- vAA
- GET_INST_OPCODE v0 # extract opcode from rINST
- sh a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_short: /* 0x51 */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 1
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a2, a4 # a2 <- vAA
- GET_INST_OPCODE v0 # extract opcode from rINST
- sh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget: /* 0x52 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide: /* 0x53 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object: /* 0x54 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean: /* 0x55 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte: /* 0x56 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char: /* 0x57 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short: /* 0x58 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput: /* 0x59 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide: /* 0x5a */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object: /* 0x5b */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean: /* 0x5c */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte: /* 0x5d */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char: /* 0x5e */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short: /* 0x5f */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget: /* 0x60 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_wide: /* 0x61 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_object: /* 0x62 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_boolean: /* 0x63 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_byte: /* 0x64 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_char: /* 0x65 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_short: /* 0x66 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput: /* 0x67 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_wide: /* 0x68 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_object: /* 0x69 */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_boolean: /* 0x6a */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_byte: /* 0x6b */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_char: /* 0x6c */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_short: /* 0x6d */
-TODO
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual: /* 0x6e */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtual
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeVirtual
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
- /*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super: /* 0x6f */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuper
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeSuper
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
- /*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct: /* 0x70 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirect
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeDirect
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static: /* 0x71 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStatic
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeStatic
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface: /* 0x72 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterface
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeInterface
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
- /*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
- .extern MterpSuspendCheck
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- li a0, 0
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeVirtualRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super_range: /* 0x75 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuperRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeSuperRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirectRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeDirectRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static_range: /* 0x77 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStaticRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeStaticRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterfaceRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeInterfaceRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_79: /* 0x79 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_7a: /* 0x7a */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_int: /* 0x7b */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * not-int, neg-int
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- subu a0, zero, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_int: /* 0x7c */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * not-int, neg-int
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- nor a0, zero, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_long: /* 0x7d */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * For: not-long, neg-long
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- dsubu a0, zero, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_long: /* 0x7e */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * For: not-long, neg-long
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- nor a0, zero, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_float: /* 0x7f */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_FLOAT f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- neg.s f0, f0
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_double: /* 0x80 */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_DOUBLE f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- neg.d f0, f0
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_long: /* 0x81 */
- /* int-to-long vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB (sign-extended to 64 bits)
- ext a2, rINST, 8, 4 # a2 <- A
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- vB
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_float: /* 0x82 */
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_FLOAT f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- cvt.s.w f0, f0
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_double: /* 0x83 */
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_FLOAT f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- cvt.d.w f0, f0
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT a0, a2 # vA <- vB
- .else
- SET_VREG a0, a2 # vA <- vB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_float: /* 0x85 */
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_DOUBLE f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- cvt.s.l f0, f0
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_double: /* 0x86 */
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_DOUBLE f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- cvt.d.l f0, f0
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_int: /* 0x87 */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_FLOAT f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- trunc.w.s f0, f0
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_long: /* 0x88 */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_FLOAT f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- trunc.l.s f0, f0
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_double: /* 0x89 */
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_FLOAT f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- cvt.d.s f0, f0
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_int: /* 0x8a */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_DOUBLE f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- trunc.w.d f0, f0
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_long: /* 0x8b */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_DOUBLE f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- trunc.l.d f0, f0
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_float: /* 0x8c */
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_DOUBLE f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- cvt.s.d f0, f0
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_byte: /* 0x8d */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * not-int, neg-int
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- seb a0, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_char: /* 0x8e */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * not-int, neg-int
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- and a0, a0, 0xffff # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_short: /* 0x8f */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * not-int, neg-int
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- seh a0, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int: /* 0x90 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int: /* 0x91 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- subu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int: /* 0x92 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int: /* 0x93 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int: /* 0x94 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int: /* 0x95 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int: /* 0x96 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int: /* 0x97 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int: /* 0x98 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- sll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int: /* 0x99 */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- sra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int: /* 0x9a */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- srl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long: /* 0x9b */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- daddu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long: /* 0x9c */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- dsubu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long: /* 0x9d */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- dmul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long: /* 0x9e */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- ddiv a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long: /* 0x9f */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- dmod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long: /* 0xa0 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long: /* 0xa1 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long: /* 0xa2 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long: /* 0xa3 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- dsll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long: /* 0xa4 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- dsra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long: /* 0xa5 */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- dsrl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float: /* 0xa6 */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- add.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float: /* 0xa7 */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- sub.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float: /* 0xa8 */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- mul.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float: /* 0xa9 */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- div.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float: /* 0xaa */
- /* rem-float vAA, vBB, vCC */
- .extern fmodf
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f12, a2 # f12 <- vBB
- GET_VREG_FLOAT f13, a3 # f13 <- vCC
- jal fmodf # f0 <- f12 op f13
- srl a4, rINST, 8 # a4 <- AA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double: /* 0xab */
- /*:
- * Generic 64-bit floating-point operation.
- *
- * For: add-double, sub-double, mul-double, div-double.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- add.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double: /* 0xac */
- /*:
- * Generic 64-bit floating-point operation.
- *
- * For: add-double, sub-double, mul-double, div-double.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- sub.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double: /* 0xad */
- /*:
- * Generic 64-bit floating-point operation.
- *
- * For: add-double, sub-double, mul-double, div-double.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- mul.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double: /* 0xae */
- /*:
- * Generic 64-bit floating-point operation.
- *
- * For: add-double, sub-double, mul-double, div-double.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- div.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double: /* 0xaf */
- /* rem-double vAA, vBB, vCC */
- .extern fmod
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f12, a2 # f12 <- vBB
- GET_VREG_DOUBLE f13, a3 # f13 <- vCC
- jal fmod # f0 <- f12 op f13
- srl a4, rINST, 8 # a4 <- AA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- subu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- sll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- sra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- srl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long_2addr: /* 0xbb */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- daddu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- dsubu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- dmul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long_2addr: /* 0xbe */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- ddiv a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- dmod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- dsll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- dsra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- dsrl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
- /*:
- * Generic 32-bit "/2addr" floating-point operation.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_FLOAT f0, a2 # f0 <- vA
- GET_VREG_FLOAT f1, a3 # f1 <- vB
- add.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
- /*:
- * Generic 32-bit "/2addr" floating-point operation.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_FLOAT f0, a2 # f0 <- vA
- GET_VREG_FLOAT f1, a3 # f1 <- vB
- sub.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
- /*:
- * Generic 32-bit "/2addr" floating-point operation.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_FLOAT f0, a2 # f0 <- vA
- GET_VREG_FLOAT f1, a3 # f1 <- vB
- mul.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
- /*:
- * Generic 32-bit "/2addr" floating-point operation.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_FLOAT f0, a2 # f0 <- vA
- GET_VREG_FLOAT f1, a3 # f1 <- vB
- div.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float_2addr: /* 0xca */
- /* rem-float/2addr vA, vB */
- .extern fmodf
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_FLOAT f12, a2 # f12 <- vA
- GET_VREG_FLOAT f13, a3 # f13 <- vB
- jal fmodf # f0 <- f12 op f13
- ext a2, rINST, 8, 4 # a2 <- A
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double_2addr: /* 0xcb */
- /*:
- * Generic 64-bit "/2addr" floating-point operation.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_DOUBLE f0, a2 # f0 <- vA
- GET_VREG_DOUBLE f1, a3 # f1 <- vB
- add.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
- /*:
- * Generic 64-bit "/2addr" floating-point operation.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_DOUBLE f0, a2 # f0 <- vA
- GET_VREG_DOUBLE f1, a3 # f1 <- vB
- sub.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
- /*:
- * Generic 64-bit "/2addr" floating-point operation.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_DOUBLE f0, a2 # f0 <- vA
- GET_VREG_DOUBLE f1, a3 # f1 <- vB
- mul.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double_2addr: /* 0xce */
- /*:
- * Generic 64-bit "/2addr" floating-point operation.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_DOUBLE f0, a2 # f0 <- vA
- GET_VREG_DOUBLE f1, a3 # f1 <- vB
- div.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
- /* rem-double/2addr vA, vB */
- .extern fmod
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_DOUBLE f12, a2 # f12 <- vA
- GET_VREG_DOUBLE f13, a3 # f13 <- vB
- jal fmod # f0 <- f12 op f13
- ext a2, rINST, 8, 4 # a2 <- A
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int: /* 0xd1 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- subu a0, a1, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- subu a0, a1, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit8: /* 0xda */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit8: /* 0xdb */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit8: /* 0xdd */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit8: /* 0xde */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- sll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- sra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- srl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_quick: /* 0xe3 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a4, rINST, 8, 4 # a4 <- A
- daddu a1, a1, a3
- beqz a3, common_errNullObject # object was null
- lw a0, 0(a1) # a0 <- obj.field
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG a0, a4 # fp[A] <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
- /* iget-wide-quick vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a4, 2(rPC) # a4 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- daddu a4, a3, a4 # create direct pointer
- lw a0, 0(a4)
- lw a1, 4(a4)
- dinsu a0, a1, 32, 32
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG_WIDE a0, a2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
- /* For: iget-object-quick */
- /* op vA, vB, offset//CCCC */
- .extern artIGetObjectFromMterp
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- EXPORT_PC
- GET_VREG_U a0, a2 # a0 <- object we're operating on
- jal artIGetObjectFromMterp # (obj, offset)
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a3, MterpPossibleException # bail out
- SET_VREG_OBJECT v0, a2 # fp[A] <- v0
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_quick: /* 0xe6 */
- /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- GET_VREG a0, a2 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a1, a3
- sw a0, 0(a1) # obj.field <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
- /* iput-wide-quick vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a3, 2(rPC) # a3 <- field byte offset
- GET_VREG_U a2, a2 # a2 <- fp[B], the object pointer
- ext a0, rINST, 8, 4 # a0 <- A
- beqz a2, common_errNullObject # object was null
- GET_VREG_WIDE a0, a0 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a2, a3 # create a direct pointer
- sw a0, 0(a1)
- dsrl32 a0, a0, 0
- sw a0, 4(a1)
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
- .extern MterpIputObjectQuick
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- jal MterpIputObjectQuick
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuick
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeVirtualQuick
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuickRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeVirtualQuickRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
- /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- GET_VREG a0, a2 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a1, a3
- sb a0, 0(a1) # obj.field <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte_quick: /* 0xec */
- /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- GET_VREG a0, a2 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a1, a3
- sb a0, 0(a1) # obj.field <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char_quick: /* 0xed */
- /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- GET_VREG a0, a2 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a1, a3
- sh a0, 0(a1) # obj.field <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short_quick: /* 0xee */
- /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- GET_VREG a0, a2 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a1, a3
- sh a0, 0(a1) # obj.field <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a4, rINST, 8, 4 # a4 <- A
- daddu a1, a1, a3
- beqz a3, common_errNullObject # object was null
- lbu a0, 0(a1) # a0 <- obj.field
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG a0, a4 # fp[A] <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a4, rINST, 8, 4 # a4 <- A
- daddu a1, a1, a3
- beqz a3, common_errNullObject # object was null
- lb a0, 0(a1) # a0 <- obj.field
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG a0, a4 # fp[A] <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a4, rINST, 8, 4 # a4 <- A
- daddu a1, a1, a3
- beqz a3, common_errNullObject # object was null
- lhu a0, 0(a1) # a0 <- obj.field
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG a0, a4 # fp[A] <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a4, rINST, 8, 4 # a4 <- A
- daddu a1, a1, a3
- beqz a3, common_errNullObject # object was null
- lh a0, 0(a1) # a0 <- obj.field
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG a0, a4 # fp[A] <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphic
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokePolymorphic
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 4
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphicRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokePolymorphicRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 4
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom: /* 0xfc */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustom
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeCustom
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustomRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeCustomRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_handle: /* 0xfe */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodHandle
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal MterpConstMethodHandle # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_type: /* 0xff */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodType
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal MterpConstMethodType # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
- .balign 128
-
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (0 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move: /* 0x01 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (1 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (2 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (3 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (4 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (5 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (6 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (7 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (8 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (9 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (10 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (11 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (12 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (13 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (14 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return: /* 0x0f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (15 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (16 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (17 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (18 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (19 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const: /* 0x14 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (20 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (21 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (22 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (23 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (24 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (25 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (26 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (27 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (28 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (29 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (30 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (31 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (32 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (33 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (34 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (35 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (36 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (37 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (38 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (39 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (40 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (41 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (42 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (43 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (44 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (45 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (46 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (47 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (48 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (49 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (50 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (51 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (52 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (53 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (54 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (55 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (56 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (57 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (58 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (59 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (60 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (61 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (62 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (63 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (64 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (65 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (66 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (67 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (68 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (69 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (70 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (71 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (72 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (73 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (74 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (75 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (76 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (77 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (78 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (79 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (80 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (81 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (82 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (83 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (84 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (85 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (86 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (87 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (88 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (89 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (90 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (91 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (92 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (93 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (94 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (95 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (96 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (97 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (98 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (99 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (100 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (101 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (102 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (103 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (104 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (105 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (106 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (107 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (108 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (109 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (110 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (111 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (112 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (113 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (114 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (115 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (116 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (117 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (118 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (119 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (120 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (121 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (122 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (123 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (124 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (125 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (126 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (127 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (128 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (129 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (130 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (131 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (132 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (133 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (134 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (135 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (136 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (137 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (138 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (139 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (140 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (141 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (142 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (143 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (144 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (145 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (146 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (147 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (148 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (149 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (150 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (151 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (152 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (153 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (154 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (155 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (156 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (157 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (158 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (159 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (160 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (161 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (162 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (163 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (164 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (165 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (166 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (167 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (168 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (169 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (170 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (171 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (172 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (173 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (174 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (175 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (176 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (177 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (178 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (179 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (180 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (181 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (182 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (183 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (184 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (185 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (186 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (187 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (188 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (189 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (190 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (191 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (192 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (193 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (194 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (195 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (196 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (197 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (198 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (199 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (200 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (201 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (202 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (203 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (204 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (205 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (206 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (207 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (208 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (209 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (210 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (211 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (212 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (213 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (214 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (215 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (216 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (217 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (218 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (219 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (220 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (221 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (222 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (223 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (224 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (225 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (226 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (227 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (228 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (229 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (230 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (231 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (232 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (233 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (234 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (235 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (236 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (237 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (238 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (239 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (240 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (241 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (242 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (243 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (244 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (245 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (246 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (247 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (248 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (249 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (250 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (251 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (252 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (253 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (254 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (255 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
- .balign 128
-
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-
- .extern MterpLogDivideByZeroException
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpLogDivideByZeroException
-#endif
- b MterpCommonFallback
-
- .extern MterpLogArrayIndexException
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpLogArrayIndexException
-#endif
- b MterpCommonFallback
-
- .extern MterpLogNullObjectException
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpLogNullObjectException
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- ld a0, THREAD_EXCEPTION_OFFSET(rSELF)
- beqzc a0, MterpFallback # If not, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
- .extern MterpHandleException
- .extern MterpShouldSwitchInterpreters
-MterpException:
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpHandleException # (self, shadow_frame)
- beqzc v0, MterpExceptionReturn # no local catch, back to caller.
- ld a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
- lwu a1, OFF_FP_DEX_PC(rFP)
- REFRESH_IBASE
- dlsa rPC, a1, a0, 1 # generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- /* resume execution at catch block */
- EXPORT_PC
- FETCH_INST
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 64 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
- bgtzc rINST, .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
- li v0, JIT_CHECK_OSR
- beqc rPROFILE, v0, .L_osr_check
- bltc rPROFILE, v0, .L_resume_backward_branch
- dsubu rPROFILE, 1
- beqzc rPROFILE, .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- REFRESH_IBASE
- daddu a2, rINST, rINST # a2<- byte offset
- FETCH_ADVANCE_INST_RB a2 # update rPC, load rINST
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- bnezc ra, .L_suspend_request_pending
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC
- move a0, rSELF
- jal MterpSuspendCheck # (self)
- bnezc v0, MterpFallback
- REFRESH_IBASE # might have changed during suspend
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-.L_no_count_backwards:
- li v0, JIT_CHECK_OSR # check for possible OSR re-entry
- bnec rPROFILE, v0, .L_resume_backward_branch
-.L_osr_check:
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- EXPORT_PC
- jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
- bnezc v0, MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- li v0, JIT_CHECK_OSR # check for possible OSR re-entry
- beqc rPROFILE, v0, .L_check_osr_forward
-.L_resume_forward_branch:
- daddu a2, rINST, rINST # a2<- byte offset
- FETCH_ADVANCE_INST_RB a2 # update rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-.L_check_osr_forward:
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- EXPORT_PC
- jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
- bnezc v0, MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
- ld a0, OFF_FP_METHOD(rFP)
- move a2, rSELF
- jal MterpAddHotnessBatch # (method, shadow_frame, self)
- move rPROFILE, v0 # restore new hotness countdown to rPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- li a2, 2
- EXPORT_PC
- jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
- bnezc v0, MterpOnStackReplacement
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST # rINST contains offset
- jal MterpLogOSR
-#endif
- li v0, 1 # Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
- .extern MterpLogFallback
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpLogFallback
-#endif
-MterpCommonFallback:
- li v0, 0 # signal retry with reference interpreter.
- b MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and RA. Here we restore SP, restore the registers, and then restore
- * RA to PC.
- *
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- li v0, 1 # signal return to caller.
- b MterpDone
-/*
- * Returned value is expected in a0 and if it's not 64-bit, the 32 most
- * significant bits of a0 must be zero-extended or sign-extended
- * depending on the return type.
- */
-MterpReturn:
- ld a2, OFF_FP_RESULT_REGISTER(rFP)
- sd a0, 0(a2)
- li v0, 1 # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- blez rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
- move rINST, v0 # stash return value
- /* Report cached hotness counts */
- ld a0, OFF_FP_METHOD(rFP)
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rSELF
- sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
- jal MterpAddHotnessBatch # (method, shadow_frame, self)
- move v0, rINST # restore return value
-
-.L_pop_and_return:
- ld s6, STACK_OFFSET_S6(sp)
- .cfi_restore 22
- ld s5, STACK_OFFSET_S5(sp)
- .cfi_restore 21
- ld s4, STACK_OFFSET_S4(sp)
- .cfi_restore 20
- ld s3, STACK_OFFSET_S3(sp)
- .cfi_restore 19
- ld s2, STACK_OFFSET_S2(sp)
- .cfi_restore 18
- ld s1, STACK_OFFSET_S1(sp)
- .cfi_restore 17
- ld s0, STACK_OFFSET_S0(sp)
- .cfi_restore 16
-
- ld ra, STACK_OFFSET_RA(sp)
- .cfi_restore 31
-
- ld t8, STACK_OFFSET_GP(sp)
- .cpreturn
- .cfi_restore 28
-
- .set noreorder
- jr ra
- daddu sp, sp, STACK_SIZE
- .cfi_adjust_cfa_offset -STACK_SIZE
-
- .cfi_endproc
- .set reorder
- .size ExecuteMterpImpl, .-ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
deleted file mode 100644
index e2793ba..0000000
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ /dev/null
@@ -1,11889 +0,0 @@
-/* DO NOT EDIT: This file was generated by gen-mterp.py. */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-/*
-x86 ABI general notes:
-
-Caller save set:
- eax, edx, ecx, st(0)-st(7)
-Callee save set:
- ebx, esi, edi, ebp
-Return regs:
- 32-bit in eax
- 64-bit in edx:eax (low-order 32 in eax)
- fp on top of fp stack st(0)
-
-Parameters passed on stack, pushed right-to-left. On entry to target, first
-parm is at 4(%esp). Traditional entry code is:
-
-functEntry:
- push %ebp # save old frame pointer
- mov %ebp,%esp # establish new frame pointer
- sub FrameSize,%esp # Allocate storage for spill, locals & outs
-
-Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp)
-
-Stack must be 16-byte aligned to support SSE in native code.
-
-If we're not doing variable stack allocation (alloca), the frame pointer can be
-eliminated and all arg references adjusted to be esp relative.
-*/
-
-/*
-Mterp and x86 notes:
-
-Some key interpreter variables will be assigned to registers.
-
- nick reg purpose
- rPC esi interpreted program counter, used for fetching instructions
- rFP edi interpreted frame pointer, used for accessing locals and args
- rINSTw bx first 16-bit code of current instruction
- rINSTbl bl opcode portion of instruction word
- rINSTbh bh high byte of inst word, usually contains src/tgt reg names
- rIBASE edx base of instruction handler table
- rREFS ebp base of object references in shadow frame.
-
-Notes:
- o High order 16 bits of ebx must be zero on entry to handler
- o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
- o eax and ecx are scratch, rINSTw/ebx sometimes scratch
-
-Macros are provided for common operations. Each macro MUST emit only
-one instruction to make instruction-counting easier. They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Handle mac compiler specific
- */
-#if defined(__APPLE__)
- #define MACRO_LITERAL(value) $(value)
- #define FUNCTION_TYPE(name)
- #define OBJECT_TYPE(name)
- #define SIZE(start,end)
- // Mac OS' symbols have an _ prefix.
- #define SYMBOL(name) _ ## name
- #define ASM_HIDDEN .private_extern
-#else
- #define MACRO_LITERAL(value) $value
- #define FUNCTION_TYPE(name) .type name, @function
- #define OBJECT_TYPE(name) .type name, @object
- #define SIZE(start,end) .size start, .-end
- #define SYMBOL(name) name
- #define ASM_HIDDEN .hidden
-#endif
-
-.macro PUSH _reg
- pushl \_reg
- .cfi_adjust_cfa_offset 4
- .cfi_rel_offset \_reg, 0
-.endm
-
-.macro POP _reg
- popl \_reg
- .cfi_adjust_cfa_offset -4
- .cfi_restore \_reg
-.endm
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/* Frame size must be 16-byte aligned.
- * Remember about 4 bytes for return address + 4 * 4 for spills
- */
-#define FRAME_SIZE 28
-
-/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3 (FRAME_SIZE + 16 + 16)
-#define IN_ARG2 (FRAME_SIZE + 16 + 12)
-#define IN_ARG1 (FRAME_SIZE + 16 + 8)
-#define IN_ARG0 (FRAME_SIZE + 16 + 4)
-/* Spill offsets relative to %esp */
-#define LOCAL0 (FRAME_SIZE - 4)
-#define LOCAL1 (FRAME_SIZE - 8)
-#define LOCAL2 (FRAME_SIZE - 12)
-/* Out Arg offsets, relative to %esp */
-#define OUT_ARG3 ( 12)
-#define OUT_ARG2 ( 8)
-#define OUT_ARG1 ( 4)
-#define OUT_ARG0 ( 0) /* <- ExecuteMterpImpl esp + 0 */
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rSELF IN_ARG0(%esp)
-#define rPC %esi
-#define CFI_DEX 6 // DWARF register number of the register holding dex-pc (esi).
-#define CFI_TMP 0 // DWARF register number of the first argument register (eax).
-#define rFP %edi
-#define rINST %ebx
-#define rINSTw %bx
-#define rINSTbh %bh
-#define rINSTbl %bl
-#define rIBASE %edx
-#define rREFS %ebp
-#define rPROFILE OFF_FP_COUNTDOWN_OFFSET(rFP)
-
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- movl rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- movl rSELF, rIBASE
- movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
-.endm
-
-/*
- * Refresh handler table.
- * IBase handles uses the caller save register so we must restore it after each call.
- * Also it is used as a result of some 64-bit operations (like imul) and we should
- * restore it in such cases also.
- *
- * TODO: Consider spilling the IBase instead of restoring it from Thread structure.
- */
-.macro RESTORE_IBASE
- movl rSELF, rIBASE
- movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
-.endm
-
-/*
- * If rSELF is already loaded then we can use it from known reg.
- */
-.macro RESTORE_IBASE_FROM_SELF _reg
- movl THREAD_CURRENT_IBASE_OFFSET(\_reg), rIBASE
-.endm
-
-/*
- * Refresh rINST.
- * At enter to handler rINST does not contain the opcode number.
- * However some utilities require the full value, so this macro
- * restores the opcode number.
- */
-.macro REFRESH_INST _opnum
- movb rINSTbl, rINSTbh
- movb MACRO_LITERAL(\_opnum), rINSTbl
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
- */
-.macro FETCH_INST
- movzwl (rPC), rINST
-.endm
-
-/*
- * Remove opcode from rINST, compute the address of handler and jump to it.
- */
-.macro GOTO_NEXT
- movzx rINSTbl,%eax
- movzbl rINSTbh,rINST
- shll MACRO_LITERAL(7), %eax
- addl rIBASE, %eax
- jmp *%eax
-.endm
-
-/*
- * Advance rPC by instruction count.
- */
-.macro ADVANCE_PC _count
- leal 2*\_count(rPC), rPC
-.endm
-
-/*
- * Advance rPC by instruction count, fetch instruction and jump to handler.
- */
-.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
- ADVANCE_PC \_count
- FETCH_INST
- GOTO_NEXT
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
-#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
-#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
-#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
-
-.macro GET_VREG _reg _vreg
- movl (rFP,\_vreg,4), \_reg
-.endm
-
-/* Read wide value to xmm. */
-.macro GET_WIDE_FP_VREG _reg _vreg
- movq (rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-/* Write wide value from xmm. xmm is clobbered. */
-.macro SET_WIDE_FP_VREG _reg _vreg
- movq \_reg, (rFP,\_vreg,4)
- pxor \_reg, \_reg
- movq \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro SET_VREG_OBJECT _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro GET_VREG_HIGH _reg _vreg
- movl 4(rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG_HIGH _reg _vreg
- movl \_reg, 4(rFP,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_WIDE_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
- .text
- ASM_HIDDEN SYMBOL(ExecuteMterpImpl)
- .global SYMBOL(ExecuteMterpImpl)
- FUNCTION_TYPE(ExecuteMterpImpl)
-
-/*
- * On entry:
- * 0 Thread* self
- * 1 insns_
- * 2 ShadowFrame
- * 3 JValue* result_register
- *
- */
-
-SYMBOL(ExecuteMterpImpl):
- .cfi_startproc
- .cfi_def_cfa esp, 4
-
- /* Spill callee save regs */
- PUSH %ebp
- PUSH %edi
- PUSH %esi
- PUSH %ebx
-
- /* Allocate frame */
- subl $FRAME_SIZE, %esp
- .cfi_adjust_cfa_offset FRAME_SIZE
-
- /* Load ShadowFrame pointer */
- movl IN_ARG2(%esp), %edx
-
- /* Remember the return register */
- movl IN_ARG3(%esp), %eax
- movl %eax, SHADOWFRAME_RESULT_REGISTER_OFFSET(%edx)
-
- /* Remember the code_item */
- movl IN_ARG1(%esp), %ecx
- movl %ecx, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(%edx)
-
- /* set up "named" registers */
- movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(%edx), %eax
- leal SHADOWFRAME_VREGS_OFFSET(%edx), rFP
- leal (rFP, %eax, 4), rREFS
- movl SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
- lea (%ecx, %eax, 2), rPC
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Set up for backwards branches & osr profiling */
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpSetUpHotnessCountdown)
-
- /* Starting ibase */
- REFRESH_IBASE
-
- /* start executing the instruction at rPC */
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
-
- OBJECT_TYPE(artMterpAsmInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
- .global SYMBOL(artMterpAsmInstructionStart)
-SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_op_nop: /* 0x00 */
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move: /* 0x01 */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movzbl rINSTbl, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- shrl $4, rINST # rINST <- B
- GET_VREG rINST, rINST
- .if 0
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_from16: /* 0x02 */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- movzx rINSTbl, %eax # eax <- AA
- movw 2(rPC), rINSTw # rINSTw <- BBBB
- GET_VREG rINST, rINST # rINST <- fp[BBBB]
- .if 0
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_16: /* 0x03 */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- movzwl 4(rPC), %ecx # ecx <- BBBB
- movzwl 2(rPC), %eax # eax <- AAAA
- GET_VREG rINST, %ecx
- .if 0
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide: /* 0x04 */
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0, %ecx # v[A] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_from16: /* 0x05 */
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwl 2(rPC), %ecx # ecx <- BBBB
- movzbl rINSTbl, %eax # eax <- AAAA
- GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_16: /* 0x06 */
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwl 4(rPC), %ecx # ecx<- BBBB
- movzwl 2(rPC), %eax # eax<- AAAA
- GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object: /* 0x07 */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movzbl rINSTbl, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- shrl $4, rINST # rINST <- B
- GET_VREG rINST, rINST
- .if 1
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_from16: /* 0x08 */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- movzx rINSTbl, %eax # eax <- AA
- movw 2(rPC), rINSTw # rINSTw <- BBBB
- GET_VREG rINST, rINST # rINST <- fp[BBBB]
- .if 1
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_16: /* 0x09 */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- movzwl 4(rPC), %ecx # ecx <- BBBB
- movzwl 2(rPC), %eax # eax <- AAAA
- GET_VREG rINST, %ecx
- .if 1
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result: /* 0x0a */
- /* for: move-result, move-result-object */
- /* op vAA */
- movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
- movl (%eax), %eax # r0 <- result.i.
- .if 0
- SET_VREG_OBJECT %eax, rINST # fp[A] <- fp[B]
- .else
- SET_VREG %eax, rINST # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_wide: /* 0x0b */
- /* move-result-wide vAA */
- movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
- movl 4(%eax), %ecx # Get high
- movl (%eax), %eax # Get low
- SET_VREG %eax, rINST # v[AA+0] <- eax
- SET_VREG_HIGH %ecx, rINST # v[AA+1] <- ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_object: /* 0x0c */
- /* for: move-result, move-result-object */
- /* op vAA */
- movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
- movl (%eax), %eax # r0 <- result.i.
- .if 1
- SET_VREG_OBJECT %eax, rINST # fp[A] <- fp[B]
- .else
- SET_VREG %eax, rINST # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_exception: /* 0x0d */
- /* move-exception vAA */
- movl rSELF, %ecx
- movl THREAD_EXCEPTION_OFFSET(%ecx), %eax
- SET_VREG_OBJECT %eax, rINST # fp[AA] <- exception object
- movl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void: /* 0x0e */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- xorl %eax, %eax
- xorl %ecx, %ecx
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return: /* 0x0f */
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINST # eax <- vAA
- xorl %ecx, %ecx
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_wide: /* 0x10 */
-/*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINST # eax <- v[AA+0]
- GET_VREG_HIGH %ecx, rINST # ecx <- v[AA+1]
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_object: /* 0x11 */
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINST # eax <- vAA
- xorl %ecx, %ecx
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_4: /* 0x12 */
- /* const/4 vA, #+B */
- movsx rINSTbl, %eax # eax <-ssssssBx
- movl $0xf, rINST
- andl %eax, rINST # rINST <- A
- sarl $4, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_16: /* 0x13 */
- /* const/16 vAA, #+BBBB */
- movswl 2(rPC), %ecx # ecx <- ssssBBBB
- SET_VREG %ecx, rINST # vAA <- ssssBBBB
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const: /* 0x14 */
- /* const vAA, #+BBBBbbbb */
- movl 2(rPC), %eax # grab all 32 bits at once
- SET_VREG %eax, rINST # vAA<- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_high16: /* 0x15 */
- /* const/high16 vAA, #+BBBB0000 */
- movzwl 2(rPC), %eax # eax <- 0000BBBB
- sall $16, %eax # eax <- BBBB0000
- SET_VREG %eax, rINST # vAA <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_16: /* 0x16 */
- /* const-wide/16 vAA, #+BBBB */
- movswl 2(rPC), %eax # eax <- ssssBBBB
- movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
- cltd # rIBASE:eax <- ssssssssssssBBBB
- SET_VREG_HIGH rIBASE, rINST # store msw
- SET_VREG %eax, rINST # store lsw
- movl %ecx, rIBASE # restore rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_32: /* 0x17 */
- /* const-wide/32 vAA, #+BBBBbbbb */
- movl 2(rPC), %eax # eax <- BBBBbbbb
- movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
- cltd # rIBASE:eax <- ssssssssssssBBBB
- SET_VREG_HIGH rIBASE, rINST # store msw
- SET_VREG %eax, rINST # store lsw
- movl %ecx, rIBASE # restore rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide: /* 0x18 */
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- movl 2(rPC), %eax # eax <- lsw
- movzbl rINSTbl, %ecx # ecx <- AA
- movl 6(rPC), rINST # rINST <- msw
- SET_VREG %eax, %ecx
- SET_VREG_HIGH rINST, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_high16: /* 0x19 */
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- movzwl 2(rPC), %eax # eax <- 0000BBBB
- sall $16, %eax # eax <- BBBB0000
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- xorl %eax, %eax
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string: /* 0x1a */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstString
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
- /* const/string vAA, String@BBBBBBBB */
- EXPORT_PC
- movl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_class: /* 0x1c */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstClass
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- GET_VREG %ecx, rINST
- movl %ecx, OUT_ARG0(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG1(%esp)
- call SYMBOL(artLockObjectFromCode) # (object, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- GET_VREG %ecx, rINST
- movl %ecx, OUT_ARG0(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG1(%esp)
- call SYMBOL(artUnlockObjectFromCode) # (object, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_check_cast: /* 0x1f */
-/*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- leal VREG_ADDRESS(rINST), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl OFF_FP_METHOD(rFP),%eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_instance_of: /* 0x20 */
-/*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, %eax # eax <- BA
- sarl $4, %eax # eax <- B
- leal VREG_ADDRESS(%eax), %ecx # Get object address
- movl %ecx, OUT_ARG1(%esp)
- movl OFF_FP_METHOD(rFP),%eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- andb $0xf, rINSTbl # rINSTbl <- A
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_array_length: /* 0x21 */
-/*
- * Return the length of an array.
- */
- mov rINST, %eax # eax <- BA
- sarl $4, rINST # rINST <- B
- GET_VREG %ecx, rINST # ecx <- vB (object ref)
- testl %ecx, %ecx # is null?
- je common_errNullObject
- andb $0xf, %al # eax <- A
- movl MIRROR_ARRAY_LENGTH_OFFSET(%ecx), rINST
- SET_VREG rINST, %eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_instance: /* 0x22 */
-/*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG1(%esp)
- REFRESH_INST 34
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpNewInstance)
- RESTORE_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_array: /* 0x23 */
-/*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST 35
- movl rINST, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpNewArray)
- RESTORE_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArray
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp)
- call SYMBOL(MterpFilledNewArray)
- REFRESH_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArrayRange
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp)
- call SYMBOL(MterpFilledNewArrayRange)
- REFRESH_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_fill_array_data: /* 0x26 */
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- movl 2(rPC), %ecx # ecx <- BBBBbbbb
- leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
- GET_VREG %eax, rINST # eax <- vAA (array object)
- movl %eax, OUT_ARG0(%esp)
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpFillArrayData) # (obj, payload)
- REFRESH_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_throw: /* 0x27 */
-/*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- GET_VREG %eax, rINST # eax<- vAA (exception object)
- testl %eax, %eax
- jz common_errNullObject
- movl rSELF,%ecx
- movl %eax, THREAD_EXCEPTION_OFFSET(%ecx)
- jmp MterpException
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto: /* 0x28 */
-/*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- movsbl rINSTbl, rINST # rINST <- ssssssAA
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_16: /* 0x29 */
-/*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- movswl 2(rPC), rINST # rINST <- ssssAAAA
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_32: /* 0x2a */
-/*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0". Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- movl 2(rPC), rINST # rINST <- AAAAAAAA
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_packed_switch: /* 0x2b */
-/*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- movl 2(rPC), %ecx # ecx <- BBBBbbbb
- GET_VREG %eax, rINST # eax <- vAA
- leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
- movl %eax, OUT_ARG1(%esp) # ARG1 <- vAA
- movl %ecx, OUT_ARG0(%esp) # ARG0 <- switchData
- call SYMBOL(MterpDoPackedSwitch)
- REFRESH_IBASE
- testl %eax, %eax
- movl %eax, rINST
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- movl 2(rPC), %ecx # ecx <- BBBBbbbb
- GET_VREG %eax, rINST # eax <- vAA
- leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
- movl %eax, OUT_ARG1(%esp) # ARG1 <- vAA
- movl %ecx, OUT_ARG0(%esp) # ARG0 <- switchData
- call SYMBOL(MterpDoSparseSwitch)
- REFRESH_IBASE
- testl %eax, %eax
- movl %eax, rINST
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx<- CC
- movzbl 2(rPC), %eax # eax<- BB
- movss VREG_ADDRESS(%eax), %xmm0
- xor %eax, %eax
- ucomiss VREG_ADDRESS(%ecx), %xmm0
- jp .Lop_cmpl_float_nan_is_neg
- je .Lop_cmpl_float_finish
- jb .Lop_cmpl_float_less
-.Lop_cmpl_float_nan_is_pos:
- incl %eax
- jmp .Lop_cmpl_float_finish
-.Lop_cmpl_float_nan_is_neg:
-.Lop_cmpl_float_less:
- decl %eax
-.Lop_cmpl_float_finish:
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx<- CC
- movzbl 2(rPC), %eax # eax<- BB
- movss VREG_ADDRESS(%eax), %xmm0
- xor %eax, %eax
- ucomiss VREG_ADDRESS(%ecx), %xmm0
- jp .Lop_cmpg_float_nan_is_pos
- je .Lop_cmpg_float_finish
- jb .Lop_cmpg_float_less
-.Lop_cmpg_float_nan_is_pos:
- incl %eax
- jmp .Lop_cmpg_float_finish
-.Lop_cmpg_float_nan_is_neg:
-.Lop_cmpg_float_less:
- decl %eax
-.Lop_cmpg_float_finish:
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx<- CC
- movzbl 2(rPC), %eax # eax<- BB
- movsd VREG_ADDRESS(%eax), %xmm0
- xor %eax, %eax
- ucomisd VREG_ADDRESS(%ecx), %xmm0
- jp .Lop_cmpl_double_nan_is_neg
- je .Lop_cmpl_double_finish
- jb .Lop_cmpl_double_less
-.Lop_cmpl_double_nan_is_pos:
- incl %eax
- jmp .Lop_cmpl_double_finish
-.Lop_cmpl_double_nan_is_neg:
-.Lop_cmpl_double_less:
- decl %eax
-.Lop_cmpl_double_finish:
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx<- CC
- movzbl 2(rPC), %eax # eax<- BB
- movsd VREG_ADDRESS(%eax), %xmm0
- xor %eax, %eax
- ucomisd VREG_ADDRESS(%ecx), %xmm0
- jp .Lop_cmpg_double_nan_is_pos
- je .Lop_cmpg_double_finish
- jb .Lop_cmpg_double_less
-.Lop_cmpg_double_nan_is_pos:
- incl %eax
- jmp .Lop_cmpg_double_finish
-.Lop_cmpg_double_nan_is_neg:
-.Lop_cmpg_double_less:
- decl %eax
-.Lop_cmpg_double_finish:
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmp_long: /* 0x31 */
-/*
- * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
- /* cmp-long vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1], BB is clobbered
- cmpl VREG_HIGH_ADDRESS(%ecx), %eax
- jl .Lop_cmp_long_smaller
- jg .Lop_cmp_long_bigger
- movzbl 2(rPC), %eax # eax <- BB, restore BB
- GET_VREG %eax, %eax # eax <- v[BB]
- sub VREG_ADDRESS(%ecx), %eax
- ja .Lop_cmp_long_bigger
- jb .Lop_cmp_long_smaller
-.Lop_cmp_long_finish:
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_cmp_long_bigger:
- movl $1, %eax
- jmp .Lop_cmp_long_finish
-
-.Lop_cmp_long_smaller:
- movl $-1, %eax
- jmp .Lop_cmp_long_finish
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eq: /* 0x32 */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- jne 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ne: /* 0x33 */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- je 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lt: /* 0x34 */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- jge 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ge: /* 0x35 */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- jl 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gt: /* 0x36 */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- jle 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_le: /* 0x37 */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- jg 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eqz: /* 0x38 */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINST) # compare (vA, 0)
- jne 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_nez: /* 0x39 */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINST) # compare (vA, 0)
- je 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ltz: /* 0x3a */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINST) # compare (vA, 0)
- jge 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gez: /* 0x3b */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINST) # compare (vA, 0)
- jl 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gtz: /* 0x3c */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINST) # compare (vA, 0)
- jle 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lez: /* 0x3d */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINST) # compare (vA, 0)
- jg 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3e: /* 0x3e */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3f: /* 0x3f */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_40: /* 0x40 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_41: /* 0x41 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_42: /* 0x42 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_43: /* 0x43 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget: /* 0x44 */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- movl MIRROR_INT_ARRAY_DATA_OFFSET(%eax,%ecx,4), %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_wide: /* 0x45 */
-/*
- * Array get, 64 bits. vAA <- vBB[vCC].
- */
- /* aget-wide vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
- movq (%eax), %xmm0 # xmm0 <- vBB[vCC]
- SET_WIDE_FP_VREG %xmm0, rINST # vAA <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_object: /* 0x46 */
-/*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecs <- vCC (requested index)
- EXPORT_PC
- movl %eax, OUT_ARG0(%esp)
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(artAGetObjectFromMterp) # (array, index)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- SET_VREG_OBJECT %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- movzbl MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_byte: /* 0x48 */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- movsbl MIRROR_BYTE_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_char: /* 0x49 */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- movzwl MIRROR_CHAR_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_short: /* 0x4a */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- movswl MIRROR_SHORT_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput: /* 0x4b */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_INT_ARRAY_DATA_OFFSET(%eax,%ecx,4), %eax
- GET_VREG rINST, rINST
- movl rINST, (%eax)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_wide: /* 0x4c */
-/*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- */
- /* aput-wide vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
- GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- vAA
- movq %xmm0, (%eax) # vBB[vCC] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_object: /* 0x4d */
-/*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST 77
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpAputObject) # (array, index)
- RESTORE_IBASE
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
- GET_VREG rINST, rINST
- movb rINSTbl, (%eax)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_byte: /* 0x4f */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_BYTE_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
- GET_VREG rINST, rINST
- movb rINSTbl, (%eax)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_char: /* 0x50 */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_CHAR_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
- GET_VREG rINST, rINST
- movw rINSTw, (%eax)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_short: /* 0x51 */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_SHORT_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
- GET_VREG rINST, rINST
- movw rINSTw, (%eax)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget: /* 0x52 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU32
- REFRESH_INST 82 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIGetU32)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide: /* 0x53 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU64
- REFRESH_INST 83 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIGetU64)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object: /* 0x54 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetObj
- REFRESH_INST 84 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIGetObj)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean: /* 0x55 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU8
- REFRESH_INST 85 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIGetU8)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte: /* 0x56 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetI8
- REFRESH_INST 86 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIGetI8)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char: /* 0x57 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU16
- REFRESH_INST 87 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIGetU16)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short: /* 0x58 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetI16
- REFRESH_INST 88 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIGetI16)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput: /* 0x59 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU32
- REFRESH_INST 89 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIPutU32)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide: /* 0x5a */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU64
- REFRESH_INST 90 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIPutU64)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object: /* 0x5b */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutObj
- REFRESH_INST 91 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIPutObj)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean: /* 0x5c */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU8
- REFRESH_INST 92 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIPutU8)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte: /* 0x5d */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutI8
- REFRESH_INST 93 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIPutI8)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char: /* 0x5e */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU16
- REFRESH_INST 94 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIPutU16)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short: /* 0x5f */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutI16
- REFRESH_INST 95 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpIPutI16)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget: /* 0x60 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU32
- REFRESH_INST 96 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSGetU32)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_wide: /* 0x61 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU64
- REFRESH_INST 97 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSGetU64)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_object: /* 0x62 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetObj
- REFRESH_INST 98 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSGetObj)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_boolean: /* 0x63 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU8
- REFRESH_INST 99 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSGetU8)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_byte: /* 0x64 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetI8
- REFRESH_INST 100 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSGetI8)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_char: /* 0x65 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU16
- REFRESH_INST 101 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSGetU16)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_short: /* 0x66 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetI16
- REFRESH_INST 102 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSGetI16)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput: /* 0x67 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU32
- REFRESH_INST 103 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSPutU32)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_wide: /* 0x68 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU64
- REFRESH_INST 104 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSPutU64)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_object: /* 0x69 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutObj
- REFRESH_INST 105 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSPutObj)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_boolean: /* 0x6a */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU8
- REFRESH_INST 106 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSPutU8)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_byte: /* 0x6b */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutI8
- REFRESH_INST 107 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSPutI8)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_char: /* 0x6c */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU16
- REFRESH_INST 108 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSPutU16)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_short: /* 0x6d */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutI16
- REFRESH_INST 109 # fix rINST to include opcode
- movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
- movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
- call SYMBOL(MterpSPutI16)
- testb %al, %al
- jz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtual
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 110
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeVirtual)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super: /* 0x6f */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuper
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 111
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeSuper)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirect
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 112
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeDirect)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static: /* 0x71 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStatic
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 113
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeStatic)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterface
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 114
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeInterface)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
- movl rSELF, %eax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- xorl %eax, %eax
- xorl %ecx, %ecx
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 116
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeVirtualRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuperRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 117
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeSuperRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirectRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 118
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeDirectRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStaticRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 119
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeStaticRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterfaceRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 120
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeInterfaceRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_79: /* 0x79 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_7a: /* 0x7a */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_int: /* 0x7b */
-/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movzbl rINSTbl,%ecx # ecx <- A+
- sarl $4,rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf,%cl # ecx <- A
- negl %eax
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_int: /* 0x7c */
-/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movzbl rINSTbl,%ecx # ecx <- A+
- sarl $4,rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf,%cl # ecx <- A
- notl %eax
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_long: /* 0x7d */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, %ecx # eax <- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # ecx <- v[B+1]
- negl %eax
- adcl $0, %ecx
- negl %ecx
- SET_VREG %eax, rINST # v[A+0] <- eax
- SET_VREG_HIGH %ecx, rINST # v[A+1] <- ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_long: /* 0x7e */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, %ecx # eax <- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # ecx <- v[B+1]
- notl %eax
- notl %ecx
- SET_VREG %eax, rINST # v[A+0] <- eax
- SET_VREG_HIGH %ecx, rINST # v[A+1] <- ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_float: /* 0x7f */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- flds VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
- fchs
- fstps VREG_ADDRESS(%ecx) # vA <- %st0
- .if 0
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_double: /* 0x80 */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fldl VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
- fchs
- fstpl VREG_ADDRESS(%ecx) # vA <- %st0
- .if 1
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_long: /* 0x81 */
- /* int to long vA, vB */
- movzbl rINSTbl, %eax # eax <- +A
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movl rIBASE, %ecx # cltd trashes rIBASE/edx
- cltd # rINST:eax<- sssssssBBBBBBBB
- SET_VREG_HIGH rIBASE, rINST # v[A+1] <- rIBASE
- SET_VREG %eax, rINST # v[A+0] <- %eax
- movl %ecx, rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_float: /* 0x82 */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fildl VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
-
- fstps VREG_ADDRESS(%ecx) # vA <- %st0
- .if 0
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_double: /* 0x83 */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fildl VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
-
- fstpl VREG_ADDRESS(%ecx) # vA <- %st0
- .if 1
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movzbl rINSTbl, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- shrl $4, rINST # rINST <- B
- GET_VREG rINST, rINST
- .if 0
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_float: /* 0x85 */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fildll VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
-
- fstps VREG_ADDRESS(%ecx) # vA <- %st0
- .if 0
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_double: /* 0x86 */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fildll VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
-
- fstpl VREG_ADDRESS(%ecx) # vA <- %st0
- .if 1
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate. This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
- /* float/double to int/long vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- .if 0
- fldl VREG_ADDRESS(rINST) # %st0 <- vB
- .else
- flds VREG_ADDRESS(rINST) # %st0 <- vB
- .endif
- ftst
- fnstcw LOCAL0(%esp) # remember original rounding mode
- movzwl LOCAL0(%esp), %eax
- movb $0xc, %ah
- movw %ax, LOCAL0+2(%esp)
- fldcw LOCAL0+2(%esp) # set "to zero" rounding mode
- andb $0xf, %cl # ecx <- A
- .if 0
- fistpll VREG_ADDRESS(%ecx) # convert and store
- .else
- fistpl VREG_ADDRESS(%ecx) # convert and store
- .endif
- fldcw LOCAL0(%esp) # restore previous rounding mode
- .if 0
- movl $0x80000000, %eax
- xorl VREG_HIGH_ADDRESS(%ecx), %eax
- orl VREG_ADDRESS(%ecx), %eax
- .else
- cmpl $0x80000000, VREG_ADDRESS(%ecx)
- .endif
- je .Lop_float_to_int_special_case # fix up result
-
-.Lop_float_to_int_finish:
- xor %eax, %eax
- mov %eax, VREG_REF_ADDRESS(%ecx)
- .if 0
- mov %eax, VREG_REF_HIGH_ADDRESS(%ecx)
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_float_to_int_special_case:
- fnstsw %ax
- sahf
- jp .Lop_float_to_int_isNaN
- adcl $-1, VREG_ADDRESS(%ecx)
- .if 0
- adcl $-1, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_float_to_int_finish
-.Lop_float_to_int_isNaN:
- movl $0, VREG_ADDRESS(%ecx)
- .if 0
- movl $0, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_float_to_int_finish
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate. This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
- /* float/double to int/long vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- .if 0
- fldl VREG_ADDRESS(rINST) # %st0 <- vB
- .else
- flds VREG_ADDRESS(rINST) # %st0 <- vB
- .endif
- ftst
- fnstcw LOCAL0(%esp) # remember original rounding mode
- movzwl LOCAL0(%esp), %eax
- movb $0xc, %ah
- movw %ax, LOCAL0+2(%esp)
- fldcw LOCAL0+2(%esp) # set "to zero" rounding mode
- andb $0xf, %cl # ecx <- A
- .if 1
- fistpll VREG_ADDRESS(%ecx) # convert and store
- .else
- fistpl VREG_ADDRESS(%ecx) # convert and store
- .endif
- fldcw LOCAL0(%esp) # restore previous rounding mode
- .if 1
- movl $0x80000000, %eax
- xorl VREG_HIGH_ADDRESS(%ecx), %eax
- orl VREG_ADDRESS(%ecx), %eax
- .else
- cmpl $0x80000000, VREG_ADDRESS(%ecx)
- .endif
- je .Lop_float_to_long_special_case # fix up result
-
-.Lop_float_to_long_finish:
- xor %eax, %eax
- mov %eax, VREG_REF_ADDRESS(%ecx)
- .if 1
- mov %eax, VREG_REF_HIGH_ADDRESS(%ecx)
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_float_to_long_special_case:
- fnstsw %ax
- sahf
- jp .Lop_float_to_long_isNaN
- adcl $-1, VREG_ADDRESS(%ecx)
- .if 1
- adcl $-1, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_float_to_long_finish
-.Lop_float_to_long_isNaN:
- movl $0, VREG_ADDRESS(%ecx)
- .if 1
- movl $0, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_float_to_long_finish
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_double: /* 0x89 */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- flds VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
-
- fstpl VREG_ADDRESS(%ecx) # vA <- %st0
- .if 1
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate. This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
- /* float/double to int/long vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- .if 1
- fldl VREG_ADDRESS(rINST) # %st0 <- vB
- .else
- flds VREG_ADDRESS(rINST) # %st0 <- vB
- .endif
- ftst
- fnstcw LOCAL0(%esp) # remember original rounding mode
- movzwl LOCAL0(%esp), %eax
- movb $0xc, %ah
- movw %ax, LOCAL0+2(%esp)
- fldcw LOCAL0+2(%esp) # set "to zero" rounding mode
- andb $0xf, %cl # ecx <- A
- .if 0
- fistpll VREG_ADDRESS(%ecx) # convert and store
- .else
- fistpl VREG_ADDRESS(%ecx) # convert and store
- .endif
- fldcw LOCAL0(%esp) # restore previous rounding mode
- .if 0
- movl $0x80000000, %eax
- xorl VREG_HIGH_ADDRESS(%ecx), %eax
- orl VREG_ADDRESS(%ecx), %eax
- .else
- cmpl $0x80000000, VREG_ADDRESS(%ecx)
- .endif
- je .Lop_double_to_int_special_case # fix up result
-
-.Lop_double_to_int_finish:
- xor %eax, %eax
- mov %eax, VREG_REF_ADDRESS(%ecx)
- .if 0
- mov %eax, VREG_REF_HIGH_ADDRESS(%ecx)
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_double_to_int_special_case:
- fnstsw %ax
- sahf
- jp .Lop_double_to_int_isNaN
- adcl $-1, VREG_ADDRESS(%ecx)
- .if 0
- adcl $-1, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_double_to_int_finish
-.Lop_double_to_int_isNaN:
- movl $0, VREG_ADDRESS(%ecx)
- .if 0
- movl $0, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_double_to_int_finish
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate. This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
- /* float/double to int/long vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- .if 1
- fldl VREG_ADDRESS(rINST) # %st0 <- vB
- .else
- flds VREG_ADDRESS(rINST) # %st0 <- vB
- .endif
- ftst
- fnstcw LOCAL0(%esp) # remember original rounding mode
- movzwl LOCAL0(%esp), %eax
- movb $0xc, %ah
- movw %ax, LOCAL0+2(%esp)
- fldcw LOCAL0+2(%esp) # set "to zero" rounding mode
- andb $0xf, %cl # ecx <- A
- .if 1
- fistpll VREG_ADDRESS(%ecx) # convert and store
- .else
- fistpl VREG_ADDRESS(%ecx) # convert and store
- .endif
- fldcw LOCAL0(%esp) # restore previous rounding mode
- .if 1
- movl $0x80000000, %eax
- xorl VREG_HIGH_ADDRESS(%ecx), %eax
- orl VREG_ADDRESS(%ecx), %eax
- .else
- cmpl $0x80000000, VREG_ADDRESS(%ecx)
- .endif
- je .Lop_double_to_long_special_case # fix up result
-
-.Lop_double_to_long_finish:
- xor %eax, %eax
- mov %eax, VREG_REF_ADDRESS(%ecx)
- .if 1
- mov %eax, VREG_REF_HIGH_ADDRESS(%ecx)
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_double_to_long_special_case:
- fnstsw %ax
- sahf
- jp .Lop_double_to_long_isNaN
- adcl $-1, VREG_ADDRESS(%ecx)
- .if 1
- adcl $-1, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_double_to_long_finish
-.Lop_double_to_long_isNaN:
- movl $0, VREG_ADDRESS(%ecx)
- .if 1
- movl $0, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_double_to_long_finish
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_float: /* 0x8c */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fldl VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
-
- fstps VREG_ADDRESS(%ecx) # vA <- %st0
- .if 0
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movzbl rINSTbl,%ecx # ecx <- A+
- sarl $4,rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf,%cl # ecx <- A
- movsbl %al, %eax
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_char: /* 0x8e */
-/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movzbl rINSTbl,%ecx # ecx <- A+
- sarl $4,rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf,%cl # ecx <- A
- movzwl %ax,%eax
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_short: /* 0x8f */
-/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movzbl rINSTbl,%ecx # ecx <- A+
- sarl $4,rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf,%cl # ecx <- A
- movswl %ax, %eax
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int: /* 0x90 */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- addl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int: /* 0x91 */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- subl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int: /* 0x92 */
- /*
- * 32-bit binary multiplication.
- */
- /* mul vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- mov rIBASE, LOCAL0(%esp)
- imull (rFP,%ecx,4), %eax # trashes rIBASE/edx
- mov LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int: /* 0x93 */
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- GET_VREG %ecx, %ecx # ecx <- vCC
- mov rIBASE, LOCAL0(%esp)
- testl %ecx, %ecx
- je common_errDivideByZero
- movl %eax, %edx
- orl %ecx, %edx
- testl $0xFFFFFF00, %edx # If both arguments are less
- # than 8-bit and +ve
- jz .Lop_div_int_8 # Do 8-bit divide
- testl $0xFFFF0000, %edx # If both arguments are less
- # than 16-bit and +ve
- jz .Lop_div_int_16 # Do 16-bit divide
- cmpl $-1, %ecx
- jne .Lop_div_int_32
- cmpl $0x80000000, %eax
- jne .Lop_div_int_32
- movl $0x80000000, %eax
- jmp .Lop_div_int_finish
-.Lop_div_int_32:
- cltd
- idivl %ecx
- jmp .Lop_div_int_finish
-.Lop_div_int_8:
- div %cl # 8-bit divide otherwise.
- # Remainder in %ah, quotient in %al
- .if 0
- movl %eax, %edx
- shr $8, %edx
- .else
- andl $0x000000FF, %eax
- .endif
- jmp .Lop_div_int_finish
-.Lop_div_int_16:
- xorl %edx, %edx # Clear %edx before divide
- div %cx
-.Lop_div_int_finish:
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int: /* 0x94 */
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- GET_VREG %ecx, %ecx # ecx <- vCC
- mov rIBASE, LOCAL0(%esp)
- testl %ecx, %ecx
- je common_errDivideByZero
- movl %eax, %edx
- orl %ecx, %edx
- testl $0xFFFFFF00, %edx # If both arguments are less
- # than 8-bit and +ve
- jz .Lop_rem_int_8 # Do 8-bit divide
- testl $0xFFFF0000, %edx # If both arguments are less
- # than 16-bit and +ve
- jz .Lop_rem_int_16 # Do 16-bit divide
- cmpl $-1, %ecx
- jne .Lop_rem_int_32
- cmpl $0x80000000, %eax
- jne .Lop_rem_int_32
- movl $0, rIBASE
- jmp .Lop_rem_int_finish
-.Lop_rem_int_32:
- cltd
- idivl %ecx
- jmp .Lop_rem_int_finish
-.Lop_rem_int_8:
- div %cl # 8-bit divide otherwise.
- # Remainder in %ah, quotient in %al
- .if 1
- movl %eax, %edx
- shr $8, %edx
- .else
- andl $0x000000FF, %eax
- .endif
- jmp .Lop_rem_int_finish
-.Lop_rem_int_16:
- xorl %edx, %edx # Clear %edx before divide
- div %cx
-.Lop_rem_int_finish:
- SET_VREG rIBASE, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int: /* 0x95 */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- andl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int: /* 0x96 */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- orl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int: /* 0x97 */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- xorl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int: /* 0x98 */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC),%eax # eax <- BB
- movzbl 3(rPC),%ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- GET_VREG %ecx, %ecx # eax <- vBB
- sall %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int: /* 0x99 */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC),%eax # eax <- BB
- movzbl 3(rPC),%ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- GET_VREG %ecx, %ecx # eax <- vBB
- sarl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int: /* 0x9a */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC),%eax # eax <- BB
- movzbl 3(rPC),%ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- GET_VREG %ecx, %ecx # eax <- vBB
- shrl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long: /* 0x9b */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- addl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
- adcl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp), rIBASE # restore rIBASE
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long: /* 0x9c */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- subl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
- sbbl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp), rIBASE # restore rIBASE
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long: /* 0x9d */
-/*
- * Signed 64-bit integer multiply.
- *
- * We could definately use more free registers for
- * this code. We spill rINSTw (ebx),
- * giving us eax, ebc, ecx and edx as computational
- * temps. On top of that, we'll spill edi (rFP)
- * for use as the vB pointer and esi (rPC) for use
- * as the vC pointer. Yuck.
- *
- */
- /* mul-long vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- B
- movzbl 3(rPC), %ecx # ecx <- C
- mov rPC, LOCAL0(%esp) # save Interpreter PC
- mov rFP, LOCAL1(%esp) # save FP
- mov rIBASE, LOCAL2(%esp) # save rIBASE
- leal (rFP,%eax,4), %esi # esi <- &v[B]
- leal (rFP,%ecx,4), rFP # rFP <- &v[C]
- movl 4(%esi), %ecx # ecx <- Bmsw
- imull (rFP), %ecx # ecx <- (Bmsw*Clsw)
- movl 4(rFP), %eax # eax <- Cmsw
- imull (%esi), %eax # eax <- (Cmsw*Blsw)
- addl %eax, %ecx # ecx <- (Bmsw*Clsw)+(Cmsw*Blsw)
- movl (rFP), %eax # eax <- Clsw
- mull (%esi) # eax <- (Clsw*Alsw)
- mov LOCAL0(%esp), rPC # restore Interpreter PC
- mov LOCAL1(%esp), rFP # restore FP
- leal (%ecx,rIBASE), rIBASE # full result now in rIBASE:%eax
- SET_VREG_HIGH rIBASE, rINST # v[B+1] <- rIBASE
- mov LOCAL2(%esp), rIBASE # restore IBASE
- SET_VREG %eax, rINST # v[B] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long: /* 0x9e */
-/* art_quick_* methods has quick abi,
- * so use eax, ecx, edx, ebx for args
- */
- /* div vAA, vBB, vCC */
- .extern art_quick_ldiv
- mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
- mov rINST, LOCAL1(%esp) # save rINST/%ebx
- movzbl 3(rPC), %eax # eax <- CC
- GET_VREG %ecx, %eax
- GET_VREG_HIGH %ebx, %eax
- movl %ecx, %edx
- orl %ebx, %ecx
- jz common_errDivideByZero
- movzbl 2(rPC), %eax # eax <- BB
- GET_VREG_HIGH %ecx, %eax
- GET_VREG %eax, %eax
- call SYMBOL(art_quick_ldiv)
- mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE, rINST
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long: /* 0x9f */
-/* art_quick_* methods has quick abi,
- * so use eax, ecx, edx, ebx for args
- */
- /* div vAA, vBB, vCC */
- .extern art_quick_lmod
- mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
- mov rINST, LOCAL1(%esp) # save rINST/%ebx
- movzbl 3(rPC), %eax # eax <- CC
- GET_VREG %ecx, %eax
- GET_VREG_HIGH %ebx, %eax
- movl %ecx, %edx
- orl %ebx, %ecx
- jz common_errDivideByZero
- movzbl 2(rPC), %eax # eax <- BB
- GET_VREG_HIGH %ecx, %eax
- GET_VREG %eax, %eax
- call SYMBOL(art_quick_lmod)
- mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE, rINST
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long: /* 0xa0 */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- andl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
- andl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp), rIBASE # restore rIBASE
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long: /* 0xa1 */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- orl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
- orl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp), rIBASE # restore rIBASE
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long: /* 0xa2 */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- xorl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
- xorl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp), rIBASE # restore rIBASE
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long: /* 0xa3 */
-/*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance. x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
- /* shl-long vAA, vBB, vCC */
- /* ecx gets shift count */
- /* Need to spill rINST */
- /* rINSTw gets AA */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, %eax # ecx <- v[BB+1]
- GET_VREG %ecx, %ecx # ecx <- vCC
- GET_VREG %eax, %eax # eax <- v[BB+0]
- shldl %eax,rIBASE
- sall %cl, %eax
- testb $32, %cl
- je 2f
- movl %eax, rIBASE
- xorl %eax, %eax
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- %eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long: /* 0xa4 */
-/*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance. x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
- /* shr-long vAA, vBB, vCC */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, %eax # rIBASE<- v[BB+1]
- GET_VREG %ecx, %ecx # ecx <- vCC
- GET_VREG %eax, %eax # eax <- v[BB+0]
- shrdl rIBASE, %eax
- sarl %cl, rIBASE
- testb $32, %cl
- je 2f
- movl rIBASE, %eax
- sarl $31, rIBASE
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance. x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
- /* shr-long vAA, vBB, vCC */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, %eax # rIBASE <- v[BB+1]
- GET_VREG %ecx, %ecx # ecx <- vCC
- GET_VREG %eax, %eax # eax <- v[BB+0]
- shrdl rIBASE, %eax
- shrl %cl, rIBASE
- testb $32, %cl
- je 2f
- movl rIBASE, %eax
- xorl rIBASE, rIBASE
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[BB+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float: /* 0xa6 */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- addss VREG_ADDRESS(%eax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float: /* 0xa7 */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- subss VREG_ADDRESS(%eax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float: /* 0xa8 */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- mulss VREG_ADDRESS(%eax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float: /* 0xa9 */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- divss VREG_ADDRESS(%eax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float: /* 0xaa */
- /* rem_float vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx <- BB
- movzbl 2(rPC), %eax # eax <- CC
- flds VREG_ADDRESS(%ecx) # vBB to fp stack
- flds VREG_ADDRESS(%eax) # vCC to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstps VREG_ADDRESS(rINST) # %st to vAA
- CLEAR_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double: /* 0xab */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- addsd VREG_ADDRESS(%eax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double: /* 0xac */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- subsd VREG_ADDRESS(%eax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double: /* 0xad */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- mulsd VREG_ADDRESS(%eax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double: /* 0xae */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- divsd VREG_ADDRESS(%eax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double: /* 0xaf */
- /* rem_double vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx <- BB
- movzbl 2(rPC), %eax # eax <- CC
- fldl VREG_ADDRESS(%ecx) # %st1 <- fp[vBB]
- fldl VREG_ADDRESS(%eax) # %st0 <- fp[vCC]
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstpl VREG_ADDRESS(rINST) # fp[vAA] <- %st
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf, %cl # ecx <- A
- addl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf, %cl # ecx <- A
- subl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
- /* mul vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf, %cl # ecx <- A
- movl rIBASE, rINST
- imull (rFP,%ecx,4), %eax # trashes rIBASE/edx
- movl rINST, rIBASE
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movzx rINSTbl, %ecx # eax <- BA
- mov rIBASE, LOCAL0(%esp)
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # eax <- vBB
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- vBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $-1, %ecx
- jne .Lop_div_int_2addr_continue_div2addr
- cmpl $0x80000000, %eax
- jne .Lop_div_int_2addr_continue_div2addr
- movl $0x80000000, %eax
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_div_int_2addr_continue_div2addr:
- cltd
- idivl %ecx
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movzx rINSTbl, %ecx # eax <- BA
- mov rIBASE, LOCAL0(%esp)
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # eax <- vBB
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- vBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $-1, %ecx
- jne .Lop_rem_int_2addr_continue_div2addr
- cmpl $0x80000000, %eax
- jne .Lop_rem_int_2addr_continue_div2addr
- movl $0, rIBASE
- SET_VREG rIBASE, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_rem_int_2addr_continue_div2addr:
- cltd
- idivl %ecx
- SET_VREG rIBASE, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf, %cl # ecx <- A
- andl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf, %cl # ecx <- A
- orl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf, %cl # ecx <- A
- xorl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movzx rINSTbl, %ecx # eax <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # eax <- vBB
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- vAA
- sall %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movzx rINSTbl, %ecx # eax <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # eax <- vBB
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- vAA
- sarl %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movzx rINSTbl, %ecx # eax <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # eax <- vBB
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- vAA
- shrl %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %eax, %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
- andb $0xF, rINSTbl # rINST<- A
- addl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
- adcl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %eax, %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
- andb $0xF, rINSTbl # rINST<- A
- subl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
- sbbl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
-/*
- * Signed 64-bit integer multiply, 2-addr version
- *
- * We could definately use more free registers for
- * this code. We must spill %edx (rIBASE) because it
- * is used by imul. We'll also spill rINST (ebx),
- * giving us eax, ebc, ecx and rIBASE as computational
- * temps. On top of that, we'll spill %esi (edi)
- * for use as the vA pointer and rFP (esi) for use
- * as the vB pointer. Yuck.
- */
- /* mul-long/2addr vA, vB */
- movzbl rINSTbl, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- CLEAR_WIDE_REF %eax # clear refs in advance
- sarl $4, rINST # rINST <- B
- mov rPC, LOCAL0(%esp) # save Interpreter PC
- mov rFP, LOCAL1(%esp) # save FP
- mov rIBASE, LOCAL2(%esp) # save rIBASE
- leal (rFP,%eax,4), %esi # esi <- &v[A]
- leal (rFP,rINST,4), rFP # rFP <- &v[B]
- movl 4(%esi), %ecx # ecx <- Amsw
- imull (rFP), %ecx # ecx <- (Amsw*Blsw)
- movl 4(rFP), %eax # eax <- Bmsw
- imull (%esi), %eax # eax <- (Bmsw*Alsw)
- addl %eax, %ecx # ecx <- (Amsw*Blsw)+(Bmsw*Alsw)
- movl (rFP), %eax # eax <- Blsw
- mull (%esi) # eax <- (Blsw*Alsw)
- leal (%ecx,rIBASE), rIBASE # full result now in %edx:%eax
- movl rIBASE, 4(%esi) # v[A+1] <- rIBASE
- movl %eax, (%esi) # v[A] <- %eax
- mov LOCAL0(%esp), rPC # restore Interpreter PC
- mov LOCAL2(%esp), rIBASE # restore IBASE
- mov LOCAL1(%esp), rFP # restore FP
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/* art_quick_* methods has quick abi,
- * so use eax, ecx, edx, ebx for args
- */
- /* div/2addr vA, vB */
- .extern art_quick_ldiv
- mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
- movzbl rINSTbl, %eax
- shrl $4, %eax # eax <- B
- andb $0xf, rINSTbl # rINST <- A
- mov rINST, LOCAL1(%esp) # save rINST/%ebx
- movl %ebx, %ecx
- GET_VREG %edx, %eax
- GET_VREG_HIGH %ebx, %eax
- movl %edx, %eax
- orl %ebx, %eax
- jz common_errDivideByZero
- GET_VREG %eax, %ecx
- GET_VREG_HIGH %ecx, %ecx
- call SYMBOL(art_quick_ldiv)
- mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE, rINST
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* art_quick_* methods has quick abi,
- * so use eax, ecx, edx, ebx for args
- */
- /* div/2addr vA, vB */
- .extern art_quick_lmod
- mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
- movzbl rINSTbl, %eax
- shrl $4, %eax # eax <- B
- andb $0xf, rINSTbl # rINST <- A
- mov rINST, LOCAL1(%esp) # save rINST/%ebx
- movl %ebx, %ecx
- GET_VREG %edx, %eax
- GET_VREG_HIGH %ebx, %eax
- movl %edx, %eax
- orl %ebx, %eax
- jz common_errDivideByZero
- GET_VREG %eax, %ecx
- GET_VREG_HIGH %ecx, %ecx
- call SYMBOL(art_quick_lmod)
- mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE, rINST
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %eax, %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
- andb $0xF, rINSTbl # rINST<- A
- andl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
- andl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %eax, %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
- andb $0xF, rINSTbl # rINST<- A
- orl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
- orl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %eax, %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
- andb $0xF, rINSTbl # rINST<- A
- xorl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
- xorl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl rINSTbl, %ecx # ecx <- BA
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- v[AA+0]
- sarl $4, %ecx # ecx <- B
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx, %ecx # ecx <- vBB
- shldl %eax, rIBASE
- sall %cl, %eax
- testb $32, %cl
- je 2f
- movl %eax, rIBASE
- xorl %eax, %eax
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl rINSTbl, %ecx # ecx <- BA
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- v[AA+0]
- sarl $4, %ecx # ecx <- B
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx, %ecx # ecx <- vBB
- shrdl rIBASE, %eax
- sarl %cl, rIBASE
- testb $32, %cl
- je 2f
- movl rIBASE, %eax
- sarl $31, rIBASE
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl rINSTbl, %ecx # ecx <- BA
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- v[AA+0]
- sarl $4, %ecx # ecx <- B
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx, %ecx # ecx <- vBB
- shrdl rIBASE, %eax
- shrl %cl, rIBASE
- testb $32, %cl
- je 2f
- movl rIBASE, %eax
- xorl rIBASE, rIBASE
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- addss VREG_ADDRESS(rINST), %xmm0
- movss %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- subss VREG_ADDRESS(rINST), %xmm0
- movss %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- mulss VREG_ADDRESS(rINST), %xmm0
- movss %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- divss VREG_ADDRESS(rINST), %xmm0
- movss %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float_2addr: /* 0xca */
- /* rem_float/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- flds VREG_ADDRESS(rINST) # vB to fp stack
- andb $0xf, %cl # ecx <- A
- flds VREG_ADDRESS(%ecx) # vA to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstps VREG_ADDRESS(%ecx) # %st to vA
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double_2addr: /* 0xcb */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- addsd VREG_ADDRESS(rINST), %xmm0
- movsd %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- subsd VREG_ADDRESS(rINST), %xmm0
- movsd %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- mulsd VREG_ADDRESS(rINST), %xmm0
- movsd %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double_2addr: /* 0xce */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- divsd VREG_ADDRESS(rINST), %xmm0
- movsd %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
- /* rem_double/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fldl VREG_ADDRESS(rINST) # vB to fp stack
- andb $0xf, %cl # ecx <- A
- fldl VREG_ADDRESS(%ecx) # vA to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstpl VREG_ADDRESS(%ecx) # %st to vA
- CLEAR_WIDE_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- addl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- subl %eax, %ecx # for example: addl %ecx, %eax
- SET_VREG %ecx, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
- /* mul/lit16 vA, vB, #+CCCC */
- /* Need A in rINST, ssssCCCC in ecx, vB in eax */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movl rIBASE, %ecx
- movswl 2(rPC), rIBASE # rIBASE <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- imull rIBASE, %eax # trashes rIBASE/edx
- movl %ecx, rIBASE
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem/lit16 vA, vB, #+CCCC */
- /* Need A in rINST, ssssCCCC in ecx, vB in eax */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $-1, %ecx
- jne .Lop_div_int_lit16_continue_div
- cmpl $0x80000000, %eax
- jne .Lop_div_int_lit16_continue_div
- movl $0x80000000, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_div_int_lit16_continue_div:
- mov rIBASE, LOCAL0(%esp)
- cltd
- idivl %ecx
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem/lit16 vA, vB, #+CCCC */
- /* Need A in rINST, ssssCCCC in ecx, vB in eax */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $-1, %ecx
- jne .Lop_rem_int_lit16_continue_div
- cmpl $0x80000000, %eax
- jne .Lop_rem_int_lit16_continue_div
- movl $0, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_rem_int_lit16_continue_div:
- mov rIBASE, LOCAL0(%esp)
- cltd
- idivl %ecx
- SET_VREG rIBASE, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- andl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- orl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- xorl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- addl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- subl %eax, %ecx # ex: addl %ecx,%eax
- SET_VREG %ecx, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit8: /* 0xda */
- /* mul/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movl rIBASE, %ecx
- GET_VREG %eax, %eax # eax <- rBB
- movsbl 3(rPC), rIBASE # rIBASE <- ssssssCC
- imull rIBASE, %eax # trashes rIBASE/edx
- movl %ecx, rIBASE
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/*
- * 32-bit div/rem "lit8" binary operation. Handles special case of
- * op0=minint & op1=-1
- */
- /* div/rem/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $0x80000000, %eax
- jne .Lop_div_int_lit8_continue_div
- cmpl $-1, %ecx
- jne .Lop_div_int_lit8_continue_div
- movl $0x80000000, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_div_int_lit8_continue_div:
- mov rIBASE, LOCAL0(%esp)
- cltd
- idivl %ecx
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/*
- * 32-bit div/rem "lit8" binary operation. Handles special case of
- * op0=minint & op1=-1
- */
- /* div/rem/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $0x80000000, %eax
- jne .Lop_rem_int_lit8_continue_div
- cmpl $-1, %ecx
- jne .Lop_rem_int_lit8_continue_div
- movl $0, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_rem_int_lit8_continue_div:
- mov rIBASE, LOCAL0(%esp)
- cltd
- idivl %ecx
- SET_VREG rIBASE, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- andl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- orl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- xorl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- sall %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- sarl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- shrl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_quick: /* 0xe3 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movl (%ecx,%eax,1), %eax
- andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
- /* iget-wide-quick vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movq (%ecx,%eax,1), %xmm0
- andb $0xf, rINSTbl # rINST <- A
- SET_WIDE_FP_VREG %xmm0, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
- /* For: iget-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- movl %ecx, OUT_ARG0(%esp)
- movl %eax, OUT_ARG1(%esp)
- EXPORT_PC
- call SYMBOL(artIGetObjectFromMterp) # (obj, offset)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException # bail out
- andb $0xf,rINSTbl # rINST <- A
- SET_VREG_OBJECT %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_quick: /* 0xe6 */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINST # rINST <- v[A]
- movzwl 2(rPC), %eax # eax <- field byte offset
- movl rINST, (%ecx,%eax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
- /* iput-wide-quick vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movzwl 2(rPC), %eax # eax<- field byte offset
- leal (%ecx,%eax,1), %ecx # ecx<- Address of 64-bit target
- andb $0xf, rINSTbl # rINST<- A
- GET_WIDE_FP_VREG %xmm0, rINST # xmm0<- fp[A]/fp[A+1]
- movq %xmm0, (%ecx) # obj.field<- r0/r1
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST 232
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpIputObjectQuick)
- testb %al, %al
- jz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuick
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 233
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeVirtualQuick)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuickRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 234
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeVirtualQuickRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINST # rINST <- v[A]
- movzwl 2(rPC), %eax # eax <- field byte offset
- movb rINSTbl, (%ecx,%eax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte_quick: /* 0xec */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINST # rINST <- v[A]
- movzwl 2(rPC), %eax # eax <- field byte offset
- movb rINSTbl, (%ecx,%eax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char_quick: /* 0xed */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINST # rINST <- v[A]
- movzwl 2(rPC), %eax # eax <- field byte offset
- movw rINSTw, (%ecx,%eax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short_quick: /* 0xee */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINST # rINST <- v[A]
- movzwl 2(rPC), %eax # eax <- field byte offset
- movw rINSTw, (%ecx,%eax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movsbl (%ecx,%eax,1), %eax
- andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movsbl (%ecx,%eax,1), %eax
- andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movzwl (%ecx,%eax,1), %eax
- andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movswl (%ecx,%eax,1), %eax
- andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphic
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 250
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokePolymorphic)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 4
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphicRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 251
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokePolymorphicRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 4
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustom
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 252
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeCustom)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustomRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 253
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeCustomRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_handle: /* 0xfe */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodHandle
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstMethodHandle) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_type: /* 0xff */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodType
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstMethodType) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
- .balign 128
-
- OBJECT_TYPE(artMterpAsmInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
- .global SYMBOL(artMterpAsmInstructionEnd)
-SYMBOL(artMterpAsmInstructionEnd):
-
- OBJECT_TYPE(artMterpAsmAltInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
- .global SYMBOL(artMterpAsmAltInstructionStart)
- .text
-SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(0*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move: /* 0x01 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(1*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(2*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(3*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(4*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(5*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(6*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(7*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(8*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(9*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(10*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(11*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(12*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(13*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(14*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return: /* 0x0f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(15*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(16*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(17*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(18*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(19*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const: /* 0x14 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(20*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(21*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(22*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(23*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(24*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(25*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(26*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(27*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(28*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(29*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(30*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(31*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(32*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(33*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(34*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(35*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(36*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(37*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(38*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(39*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(40*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(41*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(42*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(43*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(44*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(45*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(46*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(47*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(48*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(49*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(50*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(51*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(52*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(53*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(54*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(55*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(56*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(57*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(58*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(59*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(60*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(61*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(62*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(63*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(64*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(65*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(66*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(67*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(68*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(69*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(70*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(71*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(72*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(73*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(74*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(75*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(76*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(77*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(78*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(79*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(80*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(81*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(82*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(83*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(84*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(85*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(86*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(87*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(88*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(89*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(90*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(91*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(92*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(93*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(94*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(95*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(96*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(97*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(98*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(99*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(100*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(101*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(102*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(103*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(104*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(105*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(106*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(107*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(108*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(109*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(110*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(111*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(112*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(113*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(114*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(115*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(116*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(117*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(118*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(119*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(120*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(121*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(122*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(123*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(124*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(125*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(126*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(127*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(128*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(129*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(130*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(131*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(132*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(133*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(134*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(135*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(136*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(137*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(138*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(139*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(140*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(141*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(142*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(143*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(144*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(145*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(146*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(147*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(148*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(149*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(150*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(151*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(152*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(153*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(154*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(155*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(156*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(157*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(158*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(159*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(160*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(161*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(162*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(163*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(164*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(165*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(166*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(167*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(168*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(169*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(170*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(171*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(172*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(173*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(174*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(175*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(176*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(177*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(178*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(179*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(180*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(181*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(182*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(183*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(184*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(185*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(186*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(187*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(188*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(189*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(190*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(191*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(192*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(193*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(194*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(195*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(196*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(197*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(198*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(199*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(200*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(201*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(202*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(203*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(204*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(205*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(206*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(207*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(208*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(209*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(210*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(211*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(212*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(213*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(214*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(215*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(216*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(217*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(218*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(219*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(220*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(221*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(222*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(223*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(224*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(225*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(226*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(227*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(228*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(229*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(230*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(231*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(232*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(233*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(234*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(235*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(236*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(237*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(238*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(239*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(240*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(241*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(242*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(243*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(244*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(245*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(246*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(247*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(248*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(249*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(250*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(251*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(252*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(253*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(254*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(255*128)
-
- .balign 128
-
- OBJECT_TYPE(artMterpAsmAltInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
- .global SYMBOL(artMterpAsmAltInstructionEnd)
-SYMBOL(artMterpAsmAltInstructionEnd):
-
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogDivideByZeroException)
-#endif
- jmp MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogArrayIndexException)
-#endif
- jmp MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogNegativeArraySizeException)
-#endif
- jmp MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogNoSuchMethodException)
-#endif
- jmp MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogNullObjectException)
-#endif
- jmp MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG0(%esp)
- call SYMBOL(MterpLogExceptionThrownException)
-#endif
- jmp MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG0(%esp)
- movl THREAD_FLAGS_OFFSET(%eax), %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpLogSuspendFallback)
-#endif
- jmp MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- movl rSELF, %eax
- testl $-1, THREAD_EXCEPTION_OFFSET(%eax)
- jz MterpFallback
- /* intentional fallthrough - handle pending exception. */
-
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpHandleException)
- testb %al, %al
- jz MterpExceptionReturn
- movl OFF_FP_DEX_INSTRUCTIONS(rFP), %eax
- movl OFF_FP_DEX_PC(rFP), %ecx
- lea (%eax, %ecx, 2), rPC
- movl rPC, OFF_FP_DEX_PC_PTR(rFP)
- /* Do we need to switch interpreters? */
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- /* resume execution at catch block */
- REFRESH_IBASE
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranch:
- jg .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_osr_check
- decw rPROFILE
- je .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- movl rSELF, %eax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- leal (rPC, rINST, 2), rPC
- FETCH_INST
- jnz .L_suspend_request_pending
- REFRESH_IBASE
- GOTO_NEXT
-
-.L_suspend_request_pending:
- EXPORT_PC
- movl %eax, OUT_ARG0(%esp) # rSELF in eax
- call SYMBOL(MterpSuspendCheck) # (self)
- testb %al, %al
- jnz MterpFallback
- REFRESH_IBASE # might have changed during suspend
- GOTO_NEXT
-
-.L_no_count_backwards:
- cmpw $JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- jne .L_resume_backward_branch
-.L_osr_check:
- EXPORT_PC
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jz .L_resume_backward_branch
- jmp MterpOnStackReplacement
-
-.L_forward_branch:
- cmpw $JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- je .L_check_osr_forward
-.L_resume_forward_branch:
- leal (rPC, rINST, 2), rPC
- FETCH_INST
- GOTO_NEXT
-
-.L_check_osr_forward:
- EXPORT_PC
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- REFRESH_IBASE
- jz .L_resume_forward_branch
- jmp MterpOnStackReplacement
-
-.L_add_batch:
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- jmp .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- EXPORT_PC
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl $2, OUT_ARG2(%esp)
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- REFRESH_IBASE
- jnz MterpOnStackReplacement
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpLogOSR)
-#endif
- movl $1, %eax
- jmp MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogFallback)
-#endif
-MterpCommonFallback:
- xor %eax, %eax
- jmp MterpDone
-
-/*
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- movl $1, %eax
- jmp MterpDone
-MterpReturn:
- movl OFF_FP_RESULT_REGISTER(rFP), %edx
- movl %eax, (%edx)
- movl %ecx, 4(%edx)
- mov $1, %eax
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- cmpw $0, rPROFILE
- jle MRestoreFrame # if > 0, we may have some counts to report.
-
- movl %eax, rINST # stash return value
- /* Report cached hotness counts */
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- movl rINST, %eax # restore return value
-
- /* pop up frame */
-MRestoreFrame:
- addl $FRAME_SIZE, %esp
- .cfi_adjust_cfa_offset -FRAME_SIZE
-
- /* Restore callee save register */
- POP %ebx
- POP %esi
- POP %edi
- POP %ebp
- ret
- .cfi_endproc
- SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
-
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
deleted file mode 100644
index 96f5204..0000000
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ /dev/null
@@ -1,11040 +0,0 @@
-/* DO NOT EDIT: This file was generated by gen-mterp.py. */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-/*
-x86_64 ABI general notes:
-
-Caller save set:
- rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
-Callee save set:
- rbx, rbp, r12-r15
-Return regs:
- 32-bit in eax
- 64-bit in rax
- fp on xmm0
-
-First 8 fp parameters came in xmm0-xmm7.
-First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
-Other parameters passed on stack, pushed right-to-left. On entry to target, first
-param is at 8(%esp). Traditional entry code is:
-
-Stack must be 16-byte aligned to support SSE in native code.
-
-If we're not doing variable stack allocation (alloca), the frame pointer can be
-eliminated and all arg references adjusted to be esp relative.
-*/
-
-/*
-Mterp and x86_64 notes:
-
-Some key interpreter variables will be assigned to registers.
-
- nick reg purpose
- rPROFILE rbp countdown register for jit profiling
- rPC r12 interpreted program counter, used for fetching instructions
- rFP r13 interpreted frame pointer, used for accessing locals and args
- rINSTw bx first 16-bit code of current instruction
- rINSTbl bl opcode portion of instruction word
- rINSTbh bh high byte of inst word, usually contains src/tgt reg names
- rIBASE r14 base of instruction handler table
- rREFS r15 base of object references in shadow frame.
-
-Notes:
- o High order 16 bits of ebx must be zero on entry to handler
- o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
- o eax and ecx are scratch, rINSTw/ebx sometimes scratch
-
-Macros are provided for common operations. Each macro MUST emit only
-one instruction to make instruction-counting easier. They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Handle mac compiler specific
- */
-#if defined(__APPLE__)
- #define MACRO_LITERAL(value) $(value)
- #define FUNCTION_TYPE(name)
- #define OBJECT_TYPE(name)
- #define SIZE(start,end)
- // Mac OS' symbols have an _ prefix.
- #define SYMBOL(name) _ ## name
- #define ASM_HIDDEN .private_extern
-#else
- #define MACRO_LITERAL(value) $value
- #define FUNCTION_TYPE(name) .type name, @function
- #define OBJECT_TYPE(name) .type name, @object
- #define SIZE(start,end) .size start, .-end
- #define SYMBOL(name) name
- #define ASM_HIDDEN .hidden
-#endif
-
-.macro PUSH _reg
- pushq \_reg
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset \_reg, 0
-.endm
-
-.macro POP _reg
- popq \_reg
- .cfi_adjust_cfa_offset -8
- .cfi_restore \_reg
-.endm
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
-#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
-
-/* Frame size must be 16-byte aligned.
- * Remember about 8 bytes for return address + 6 * 8 for spills.
- */
-#define FRAME_SIZE 8
-
-/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3 %rcx
-#define IN_ARG2 %rdx
-#define IN_ARG1 %rsi
-#define IN_ARG0 %rdi
-/* Spill offsets relative to %esp */
-#define SELF_SPILL (FRAME_SIZE - 8)
-/* Out Args */
-#define OUT_ARG3 %rcx
-#define OUT_ARG2 %rdx
-#define OUT_ARG1 %rsi
-#define OUT_ARG0 %rdi
-#define OUT_32_ARG3 %ecx
-#define OUT_32_ARG2 %edx
-#define OUT_32_ARG1 %esi
-#define OUT_32_ARG0 %edi
-#define OUT_FP_ARG1 %xmm1
-#define OUT_FP_ARG0 %xmm0
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rSELF SELF_SPILL(%rsp)
-#define rPC %r12
-#define CFI_DEX 12 // DWARF register number of the register holding dex-pc (rPC).
-#define CFI_TMP 5 // DWARF register number of the first argument register (rdi).
-#define rFP %r13
-#define rINST %ebx
-#define rINSTq %rbx
-#define rINSTw %bx
-#define rINSTbh %bh
-#define rINSTbl %bl
-#define rIBASE %r14
-#define rREFS %r15
-#define rPROFILE %ebp
-
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- movq rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- * IBase handles uses the caller save register so we must restore it after each call.
- * Also it is used as a result of some 64-bit operations (like imul) and we should
- * restore it in such cases also.
- *
- */
-.macro REFRESH_IBASE_REG self_reg
- movq THREAD_CURRENT_IBASE_OFFSET(\self_reg), rIBASE
-.endm
-.macro REFRESH_IBASE
- movq rSELF, rIBASE
- REFRESH_IBASE_REG rIBASE
-.endm
-
-/*
- * Refresh rINST.
- * At enter to handler rINST does not contain the opcode number.
- * However some utilities require the full value, so this macro
- * restores the opcode number.
- */
-.macro REFRESH_INST _opnum
- movb rINSTbl, rINSTbh
- movb $\_opnum, rINSTbl
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
- */
-.macro FETCH_INST
- movzwq (rPC), rINSTq
-.endm
-
-/*
- * Remove opcode from rINST, compute the address of handler and jump to it.
- */
-.macro GOTO_NEXT
- movzx rINSTbl,%eax
- movzbl rINSTbh,rINST
- shll MACRO_LITERAL(7), %eax
- addq rIBASE, %rax
- jmp *%rax
-.endm
-
-/*
- * Advance rPC by instruction count.
- */
-.macro ADVANCE_PC _count
- leaq 2*\_count(rPC), rPC
-.endm
-
-/*
- * Advance rPC by instruction count, fetch instruction and jump to handler.
- */
-.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
- ADVANCE_PC \_count
- FETCH_INST
- GOTO_NEXT
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
-#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
-
-.macro GET_VREG _reg _vreg
- movl (rFP,\_vreg,4), \_reg
-.endm
-
-/* Read wide value. */
-.macro GET_WIDE_VREG _reg _vreg
- movq (rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-/* Write wide value. reg is clobbered. */
-.macro SET_WIDE_VREG _reg _vreg
- movq \_reg, (rFP,\_vreg,4)
- xorq \_reg, \_reg
- movq \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro SET_VREG_OBJECT _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro GET_VREG_HIGH _reg _vreg
- movl 4(rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG_HIGH _reg _vreg
- movl \_reg, 4(rFP,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_WIDE_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
- .text
- ASM_HIDDEN SYMBOL(ExecuteMterpImpl)
- .global SYMBOL(ExecuteMterpImpl)
- FUNCTION_TYPE(ExecuteMterpImpl)
-
-/*
- * On entry:
- * 0 Thread* self
- * 1 insns_
- * 2 ShadowFrame
- * 3 JValue* result_register
- *
- */
-
-SYMBOL(ExecuteMterpImpl):
- .cfi_startproc
- .cfi_def_cfa rsp, 8
-
- /* Spill callee save regs */
- PUSH %rbx
- PUSH %rbp
- PUSH %r12
- PUSH %r13
- PUSH %r14
- PUSH %r15
-
- /* Allocate frame */
- subq $FRAME_SIZE, %rsp
- .cfi_adjust_cfa_offset FRAME_SIZE
-
- /* Remember the return register */
- movq IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
-
- /* Remember the code_item */
- movq IN_ARG1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(IN_ARG2)
-
- /* set up "named" registers */
- movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
- leaq SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
- leaq (rFP, %rax, 4), rREFS
- movl SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
- leaq (IN_ARG1, %rax, 2), rPC
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- movq IN_ARG0, rSELF
- REFRESH_IBASE_REG IN_ARG0
-
- /* Set up for backwards branches & osr profiling */
- movq IN_ARG0, OUT_ARG2 /* Set up OUT_ARG2 before clobbering IN_ARG0 */
- movq OFF_FP_METHOD(rFP), OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpSetUpHotnessCountdown)
- movswl %ax, rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
-
- OBJECT_TYPE(artMterpAsmInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
- .global SYMBOL(artMterpAsmInstructionStart)
-SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_op_nop: /* 0x00 */
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move: /* 0x01 */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movl rINST, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- shrl $4, rINST # rINST <- B
- GET_VREG %edx, rINSTq
- .if 0
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_from16: /* 0x02 */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- movzwq 2(rPC), %rax # eax <- BBBB
- GET_VREG %edx, %rax # edx <- fp[BBBB]
- .if 0
- SET_VREG_OBJECT %edx, rINSTq # fp[A] <- fp[B]
- .else
- SET_VREG %edx, rINSTq # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_16: /* 0x03 */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- movzwq 4(rPC), %rcx # ecx <- BBBB
- movzwq 2(rPC), %rax # eax <- AAAA
- GET_VREG %edx, %rcx
- .if 0
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide: /* 0x04 */
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movl rINST, %ecx # ecx <- BA
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rdx, rINSTq # rdx <- v[B]
- SET_WIDE_VREG %rdx, %rcx # v[A] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_from16: /* 0x05 */
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwl 2(rPC), %ecx # ecx <- BBBB
- GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
- SET_WIDE_VREG %rdx, rINSTq # v[A] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_16: /* 0x06 */
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwq 4(rPC), %rcx # ecx<- BBBB
- movzwq 2(rPC), %rax # eax<- AAAA
- GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
- SET_WIDE_VREG %rdx, %rax # v[A] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object: /* 0x07 */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movl rINST, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- shrl $4, rINST # rINST <- B
- GET_VREG %edx, rINSTq
- .if 1
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_from16: /* 0x08 */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- movzwq 2(rPC), %rax # eax <- BBBB
- GET_VREG %edx, %rax # edx <- fp[BBBB]
- .if 1
- SET_VREG_OBJECT %edx, rINSTq # fp[A] <- fp[B]
- .else
- SET_VREG %edx, rINSTq # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_16: /* 0x09 */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- movzwq 4(rPC), %rcx # ecx <- BBBB
- movzwq 2(rPC), %rax # eax <- AAAA
- GET_VREG %edx, %rcx
- .if 1
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result: /* 0x0a */
- /* for: move-result, move-result-object */
- /* op vAA */
- movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
- movl (%rax), %eax # r0 <- result.i.
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- fp[B]
- .else
- SET_VREG %eax, rINSTq # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_wide: /* 0x0b */
- /* move-result-wide vAA */
- movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
- movq (%rax), %rdx # Get wide
- SET_WIDE_VREG %rdx, rINSTq # v[AA] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_object: /* 0x0c */
- /* for: move-result, move-result-object */
- /* op vAA */
- movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
- movl (%rax), %eax # r0 <- result.i.
- .if 1
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- fp[B]
- .else
- SET_VREG %eax, rINSTq # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_exception: /* 0x0d */
- /* move-exception vAA */
- movq rSELF, %rcx
- movl THREAD_EXCEPTION_OFFSET(%rcx), %eax
- SET_VREG_OBJECT %eax, rINSTq # fp[AA] <- exception object
- movl $0, THREAD_EXCEPTION_OFFSET(%rcx)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void: /* 0x0e */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- xorq %rax, %rax
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return: /* 0x0f */
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINSTq # eax <- vAA
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_wide: /* 0x10 */
-/*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_WIDE_VREG %rax, rINSTq # eax <- v[AA]
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_object: /* 0x11 */
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINSTq # eax <- vAA
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_4: /* 0x12 */
- /* const/4 vA, #+B */
- movsbl rINSTbl, %eax # eax <-ssssssBx
- movl $0xf, rINST
- andl %eax, rINST # rINST <- A
- sarl $4, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_16: /* 0x13 */
- /* const/16 vAA, #+BBBB */
- movswl 2(rPC), %ecx # ecx <- ssssBBBB
- SET_VREG %ecx, rINSTq # vAA <- ssssBBBB
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const: /* 0x14 */
- /* const vAA, #+BBBBbbbb */
- movl 2(rPC), %eax # grab all 32 bits at once
- SET_VREG %eax, rINSTq # vAA<- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_high16: /* 0x15 */
- /* const/high16 vAA, #+BBBB0000 */
- movzwl 2(rPC), %eax # eax <- 0000BBBB
- sall $16, %eax # eax <- BBBB0000
- SET_VREG %eax, rINSTq # vAA <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_16: /* 0x16 */
- /* const-wide/16 vAA, #+BBBB */
- movswq 2(rPC), %rax # rax <- ssssBBBB
- SET_WIDE_VREG %rax, rINSTq # store
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_32: /* 0x17 */
- /* const-wide/32 vAA, #+BBBBbbbb */
- movslq 2(rPC), %rax # eax <- ssssssssBBBBbbbb
- SET_WIDE_VREG %rax, rINSTq # store
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide: /* 0x18 */
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- movq 2(rPC), %rax # rax <- HHHHhhhhBBBBbbbb
- SET_WIDE_VREG %rax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_high16: /* 0x19 */
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- movzwq 2(rPC), %rax # eax <- 0000BBBB
- salq $48, %rax # eax <- BBBB0000
- SET_WIDE_VREG %rax, rINSTq # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string: /* 0x1a */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstString
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
- /* const/string vAA, String@BBBBBBBB */
- EXPORT_PC
- movl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- BBBB
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_class: /* 0x1c */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstClass
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- GET_VREG OUT_32_ARG0, rINSTq
- movq rSELF, OUT_ARG1
- call SYMBOL(artLockObjectFromCode) # (object, self)
- testq %rax, %rax
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- GET_VREG OUT_32_ARG0, rINSTq
- movq rSELF, OUT_ARG1
- call SYMBOL(artUnlockObjectFromCode) # (object, self)
- testq %rax, %rax
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_check_cast: /* 0x1f */
-/*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # OUT_ARG0 <- BBBB
- leaq VREG_ADDRESS(rINSTq), OUT_ARG1
- movq OFF_FP_METHOD(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_instance_of: /* 0x20 */
-/*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC
- movzwl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- CCCC
- movl rINST, %eax # eax <- BA
- sarl $4, %eax # eax <- B
- leaq VREG_ADDRESS(%rax), OUT_ARG1 # Get object address
- movq OFF_FP_METHOD(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
- movsbl %al, %eax
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- andb $0xf, rINSTbl # rINSTbl <- A
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_array_length: /* 0x21 */
-/*
- * Return the length of an array.
- */
- movl rINST, %eax # eax <- BA
- sarl $4, rINST # rINST <- B
- GET_VREG %ecx, rINSTq # ecx <- vB (object ref)
- testl %ecx, %ecx # is null?
- je common_errNullObject
- andb $0xf, %al # eax <- A
- movl MIRROR_ARRAY_LENGTH_OFFSET(%rcx), rINST
- SET_VREG rINST, %rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_instance: /* 0x22 */
-/*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rSELF, OUT_ARG1
- REFRESH_INST 34
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpNewInstance)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_array: /* 0x23 */
-/*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST 35
- movq rINSTq, OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpNewArray)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArray
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- movq rSELF, OUT_ARG2
- call SYMBOL(MterpFilledNewArray)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArrayRange
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- movq rSELF, OUT_ARG2
- call SYMBOL(MterpFilledNewArrayRange)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_fill_array_data: /* 0x26 */
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- movslq 2(rPC), %rcx # rcx <- ssssssssBBBBbbbb
- leaq (rPC,%rcx,2), OUT_ARG1 # OUT_ARG1 <- PC + ssssssssBBBBbbbb*2
- GET_VREG OUT_32_ARG0, rINSTq # OUT_ARG0 <- vAA (array object)
- call SYMBOL(MterpFillArrayData) # (obj, payload)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_throw: /* 0x27 */
-/*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- GET_VREG %eax, rINSTq # eax<- vAA (exception object)
- testb %al, %al
- jz common_errNullObject
- movq rSELF, %rcx
- movq %rax, THREAD_EXCEPTION_OFFSET(%rcx)
- jmp MterpException
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto: /* 0x28 */
-/*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- movsbq rINSTbl, rINSTq # rINSTq <- ssssssAA
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_16: /* 0x29 */
-/*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- movswq 2(rPC), rINSTq # rINSTq <- ssssAAAA
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_32: /* 0x2a */
-/*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Because we need the SF bit set, we'll use an adds
- * to convert from Dalvik offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- movslq 2(rPC), rINSTq # rINSTq <- AAAAAAAA
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_packed_switch: /* 0x2b */
-/*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- movslq 2(rPC), OUT_ARG0 # rcx <- ssssssssBBBBbbbb
- leaq (rPC,OUT_ARG0,2), OUT_ARG0 # rcx <- PC + ssssssssBBBBbbbb*2
- GET_VREG OUT_32_ARG1, rINSTq # eax <- vAA
- call SYMBOL(MterpDoPackedSwitch)
- testl %eax, %eax
- movslq %eax, rINSTq
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- movslq 2(rPC), OUT_ARG0 # rcx <- ssssssssBBBBbbbb
- leaq (rPC,OUT_ARG0,2), OUT_ARG0 # rcx <- PC + ssssssssBBBBbbbb*2
- GET_VREG OUT_32_ARG1, rINSTq # eax <- vAA
- call SYMBOL(MterpDoSparseSwitch)
- testl %eax, %eax
- movslq %eax, rINSTq
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx<- CC
- movzbq 2(rPC), %rax # eax<- BB
- movss VREG_ADDRESS(%rax), %xmm0
- xor %eax, %eax
- ucomiss VREG_ADDRESS(%rcx), %xmm0
- jp .Lop_cmpl_float_nan_is_neg
- je .Lop_cmpl_float_finish
- jb .Lop_cmpl_float_less
-.Lop_cmpl_float_nan_is_pos:
- addb $1, %al
- jmp .Lop_cmpl_float_finish
-.Lop_cmpl_float_nan_is_neg:
-.Lop_cmpl_float_less:
- movl $-1, %eax
-.Lop_cmpl_float_finish:
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx<- CC
- movzbq 2(rPC), %rax # eax<- BB
- movss VREG_ADDRESS(%rax), %xmm0
- xor %eax, %eax
- ucomiss VREG_ADDRESS(%rcx), %xmm0
- jp .Lop_cmpg_float_nan_is_pos
- je .Lop_cmpg_float_finish
- jb .Lop_cmpg_float_less
-.Lop_cmpg_float_nan_is_pos:
- addb $1, %al
- jmp .Lop_cmpg_float_finish
-.Lop_cmpg_float_nan_is_neg:
-.Lop_cmpg_float_less:
- movl $-1, %eax
-.Lop_cmpg_float_finish:
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx<- CC
- movzbq 2(rPC), %rax # eax<- BB
- movsd VREG_ADDRESS(%rax), %xmm0
- xor %eax, %eax
- ucomisd VREG_ADDRESS(%rcx), %xmm0
- jp .Lop_cmpl_double_nan_is_neg
- je .Lop_cmpl_double_finish
- jb .Lop_cmpl_double_less
-.Lop_cmpl_double_nan_is_pos:
- addb $1, %al
- jmp .Lop_cmpl_double_finish
-.Lop_cmpl_double_nan_is_neg:
-.Lop_cmpl_double_less:
- movl $-1, %eax
-.Lop_cmpl_double_finish:
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx<- CC
- movzbq 2(rPC), %rax # eax<- BB
- movsd VREG_ADDRESS(%rax), %xmm0
- xor %eax, %eax
- ucomisd VREG_ADDRESS(%rcx), %xmm0
- jp .Lop_cmpg_double_nan_is_pos
- je .Lop_cmpg_double_finish
- jb .Lop_cmpg_double_less
-.Lop_cmpg_double_nan_is_pos:
- addb $1, %al
- jmp .Lop_cmpg_double_finish
-.Lop_cmpg_double_nan_is_neg:
-.Lop_cmpg_double_less:
- movl $-1, %eax
-.Lop_cmpg_double_finish:
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmp_long: /* 0x31 */
-/*
- * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
- /* cmp-long vAA, vBB, vCC */
- movzbq 2(rPC), %rdx # edx <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rdx, %rdx # rdx <- v[BB]
- xorl %eax, %eax
- xorl %edi, %edi
- addb $1, %al
- movl $-1, %esi
- cmpq VREG_ADDRESS(%rcx), %rdx
- cmovl %esi, %edi
- cmovg %eax, %edi
- SET_VREG %edi, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eq: /* 0x32 */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- jne 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ne: /* 0x33 */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- je 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lt: /* 0x34 */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- jge 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ge: /* 0x35 */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- jl 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gt: /* 0x36 */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- jle 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_le: /* 0x37 */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- jg 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eqz: /* 0x38 */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- jne 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_nez: /* 0x39 */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- je 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ltz: /* 0x3a */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- jge 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gez: /* 0x3b */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- jl 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gtz: /* 0x3c */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- jle 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lez: /* 0x3d */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- jg 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3e: /* 0x3e */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3f: /* 0x3f */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_40: /* 0x40 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_41: /* 0x41 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_42: /* 0x42 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_43: /* 0x43 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget: /* 0x44 */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- movq MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- movl MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,4), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_wide: /* 0x45 */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 1
- movq MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- movq MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_object: /* 0x46 */
-/*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG OUT_32_ARG0, %rax # eax <- vBB (array object)
- GET_VREG OUT_32_ARG1, %rcx # ecx <- vCC (requested index)
- EXPORT_PC
- call SYMBOL(artAGetObjectFromMterp) # (array, index)
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- SET_VREG_OBJECT %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- movq MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- movzbl MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,1), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_byte: /* 0x48 */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- movq MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- movsbl MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,1), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_char: /* 0x49 */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- movq MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- movzwl MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,2), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_short: /* 0x4a */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- movq MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- movswl MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,2), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput: /* 0x4b */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- movl rINST, MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,4)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_wide: /* 0x4c */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 1
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- movq rINSTq, MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_object: /* 0x4d */
-/*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST 77
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpAputObject) # (array, index)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- movb rINSTbl, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_byte: /* 0x4f */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- movb rINSTbl, MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_char: /* 0x50 */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- movw rINSTw, MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,2)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_short: /* 0x51 */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- movw rINSTw, MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,2)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget: /* 0x52 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU32
- REFRESH_INST 82 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIGetU32)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide: /* 0x53 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU64
- REFRESH_INST 83 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIGetU64)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object: /* 0x54 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetObj
- REFRESH_INST 84 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIGetObj)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean: /* 0x55 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU8
- REFRESH_INST 85 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIGetU8)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte: /* 0x56 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetI8
- REFRESH_INST 86 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIGetI8)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char: /* 0x57 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetU16
- REFRESH_INST 87 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIGetU16)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short: /* 0x58 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIGetI16
- REFRESH_INST 88 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIGetI16)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput: /* 0x59 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU32
- REFRESH_INST 89 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIPutU32)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide: /* 0x5a */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU64
- REFRESH_INST 90 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIPutU64)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object: /* 0x5b */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutObj
- REFRESH_INST 91 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIPutObj)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean: /* 0x5c */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU8
- REFRESH_INST 92 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIPutU8)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte: /* 0x5d */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutI8
- REFRESH_INST 93 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIPutI8)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char: /* 0x5e */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutU16
- REFRESH_INST 94 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIPutU16)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short: /* 0x5f */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpIPutI16
- REFRESH_INST 95 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpIPutI16)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget: /* 0x60 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU32
- REFRESH_INST 96 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSGetU32)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_wide: /* 0x61 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU64
- REFRESH_INST 97 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSGetU64)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_object: /* 0x62 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetObj
- REFRESH_INST 98 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSGetObj)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_boolean: /* 0x63 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU8
- REFRESH_INST 99 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSGetU8)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_byte: /* 0x64 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetI8
- REFRESH_INST 100 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSGetI8)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_char: /* 0x65 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetU16
- REFRESH_INST 101 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSGetU16)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_short: /* 0x66 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSGetI16
- REFRESH_INST 102 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSGetI16)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput: /* 0x67 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU32
- REFRESH_INST 103 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSPutU32)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_wide: /* 0x68 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU64
- REFRESH_INST 104 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSPutU64)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_object: /* 0x69 */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutObj
- REFRESH_INST 105 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSPutObj)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_boolean: /* 0x6a */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU8
- REFRESH_INST 106 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSPutU8)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_byte: /* 0x6b */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutI8
- REFRESH_INST 107 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSPutI8)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_char: /* 0x6c */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutU16
- REFRESH_INST 108 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSPutU16)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_short: /* 0x6d */
- /*
- * General field read / write (iget-* iput-* sget-* sput-*).
- */
- .extern MterpSPutI16
- REFRESH_INST 109 # fix rINST to include opcode
- movq rPC, OUT_ARG0 # arg0: Instruction* inst
- movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
- movq rSELF, OUT_ARG3 # arg3: Thread* self
- call SYMBOL(MterpSPutI16)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtual
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 110
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeVirtual)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super: /* 0x6f */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuper
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 111
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeSuper)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirect
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 112
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeDirect)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static: /* 0x71 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStatic
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 113
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeStatic)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterface
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 114
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeInterface)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
- movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- xorq %rax, %rax
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 116
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeVirtualRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuperRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 117
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeSuperRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirectRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 118
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeDirectRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStaticRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 119
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeStaticRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterfaceRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 120
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeInterfaceRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_79: /* 0x79 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_7a: /* 0x7a */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_int: /* 0x7b */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
- negl %eax
- .if 0
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_int: /* 0x7c */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
- notl %eax
- .if 0
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_long: /* 0x7d */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 1
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
- negq %rax
- .if 1
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_long: /* 0x7e */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 1
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
- notq %rax
- .if 1
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_float: /* 0x7f */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
- xorl $0x80000000, %eax
- .if 0
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_double: /* 0x80 */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 1
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
- movq $0x8000000000000000, %rsi
- xorq %rsi, %rax
- .if 1
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_long: /* 0x81 */
- /* int to long vA, vB */
- movzbq rINSTbl, %rax # rax <- +A
- sarl $4, %eax # eax <- B
- andb $0xf, rINSTbl # rINST <- A
- movslq VREG_ADDRESS(%rax), %rax
- SET_WIDE_VREG %rax, rINSTq # v[A] <- %rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_float: /* 0x82 */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- cvtsi2ssl VREG_ADDRESS(rINSTq), %xmm0
- .if 0
- movsd %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_WIDE_REF %rcx
- .else
- movss %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_REF %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_double: /* 0x83 */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- cvtsi2sdl VREG_ADDRESS(rINSTq), %xmm0
- .if 1
- movsd %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_WIDE_REF %rcx
- .else
- movss %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_REF %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movl rINST, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- shrl $4, rINST # rINST <- B
- GET_VREG %edx, rINSTq
- .if 0
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_float: /* 0x85 */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- cvtsi2ssq VREG_ADDRESS(rINSTq), %xmm0
- .if 0
- movsd %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_WIDE_REF %rcx
- .else
- movss %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_REF %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_double: /* 0x86 */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- cvtsi2sdq VREG_ADDRESS(rINSTq), %xmm0
- .if 1
- movsd %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_WIDE_REF %rcx
- .else
- movss %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_REF %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate.
- */
- /* float/double to int/long vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- movss VREG_ADDRESS(rINSTq), %xmm0
- movl $0x7fffffff, %eax
- cvtsi2ssl %eax, %xmm1
- comiss %xmm1, %xmm0
- jae 1f
- jp 2f
- cvttss2sil %xmm0, %eax
- jmp 1f
-2:
- xorl %eax, %eax
-1:
- .if 0
- SET_WIDE_VREG %eax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate.
- */
- /* float/double to int/long vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- movss VREG_ADDRESS(rINSTq), %xmm0
- movq $0x7fffffffffffffff, %rax
- cvtsi2ssq %rax, %xmm1
- comiss %xmm1, %xmm0
- jae 1f
- jp 2f
- cvttss2siq %xmm0, %rax
- jmp 1f
-2:
- xorq %rax, %rax
-1:
- .if 1
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %rax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_double: /* 0x89 */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- cvtss2sd VREG_ADDRESS(rINSTq), %xmm0
- .if 1
- movsd %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_WIDE_REF %rcx
- .else
- movss %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_REF %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate.
- */
- /* float/double to int/long vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- movsd VREG_ADDRESS(rINSTq), %xmm0
- movl $0x7fffffff, %eax
- cvtsi2sdl %eax, %xmm1
- comisd %xmm1, %xmm0
- jae 1f
- jp 2f
- cvttsd2sil %xmm0, %eax
- jmp 1f
-2:
- xorl %eax, %eax
-1:
- .if 0
- SET_WIDE_VREG %eax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate.
- */
- /* float/double to int/long vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- movsd VREG_ADDRESS(rINSTq), %xmm0
- movq $0x7fffffffffffffff, %rax
- cvtsi2sdq %rax, %xmm1
- comisd %xmm1, %xmm0
- jae 1f
- jp 2f
- cvttsd2siq %xmm0, %rax
- jmp 1f
-2:
- xorq %rax, %rax
-1:
- .if 1
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %rax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_float: /* 0x8c */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- cvtsd2ss VREG_ADDRESS(rINSTq), %xmm0
- .if 0
- movsd %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_WIDE_REF %rcx
- .else
- movss %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_REF %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
-movsbl %al, %eax
- .if 0
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_char: /* 0x8e */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
-movzwl %ax,%eax
- .if 0
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_short: /* 0x8f */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
-movswl %ax, %eax
- .if 0
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int: /* 0x90 */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB
- addl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int: /* 0x91 */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB
- subl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int: /* 0x92 */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB
- imull (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int: /* 0x93 */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- .if 0
- GET_WIDE_VREG %rax, %rax # eax <- vBB
- GET_WIDE_VREG %ecx, %rcx # ecx <- vCC
- .else
- GET_VREG %eax, %rax # eax <- vBB
- GET_VREG %ecx, %rcx # ecx <- vCC
- .endif
- testl %ecx, %ecx
- jz common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rdx:rax <- sign-extended of rax
- idivl %ecx
-1:
- .if 0
- SET_WIDE_VREG %eax, rINSTq # eax <- vBB
- .else
- SET_VREG %eax, rINSTq # eax <- vBB
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 0
- xorl %eax, %eax
- .else
- negl %eax
- .endif
- jmp 1b
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int: /* 0x94 */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- .if 0
- GET_WIDE_VREG %rax, %rax # eax <- vBB
- GET_WIDE_VREG %ecx, %rcx # ecx <- vCC
- .else
- GET_VREG %eax, %rax # eax <- vBB
- GET_VREG %ecx, %rcx # ecx <- vCC
- .endif
- testl %ecx, %ecx
- jz common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rdx:rax <- sign-extended of rax
- idivl %ecx
-1:
- .if 0
- SET_WIDE_VREG %edx, rINSTq # eax <- vBB
- .else
- SET_VREG %edx, rINSTq # eax <- vBB
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 1
- xorl %edx, %edx
- .else
- negl %edx
- .endif
- jmp 1b
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int: /* 0x95 */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB
- andl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int: /* 0x96 */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB
- orl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int: /* 0x97 */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB
- xorl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int: /* 0x98 */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %ecx, %rcx # eax <- vCC
- .if 0
- GET_WIDE_VREG %rax, %rax # rax <- vBB
- sall %cl, %eax # ex: addl %ecx,%eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, %rax # eax <- vBB
- sall %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int: /* 0x99 */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %ecx, %rcx # eax <- vCC
- .if 0
- GET_WIDE_VREG %rax, %rax # rax <- vBB
- sarl %cl, %eax # ex: addl %ecx,%eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, %rax # eax <- vBB
- sarl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int: /* 0x9a */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %ecx, %rcx # eax <- vCC
- .if 0
- GET_WIDE_VREG %rax, %rax # rax <- vBB
- shrl %cl, %eax # ex: addl %ecx,%eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, %rax # eax <- vBB
- shrl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long: /* 0x9b */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- addq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
- SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long: /* 0x9c */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- subq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
- SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long: /* 0x9d */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- imulq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
- SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long: /* 0x9e */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- .if 1
- GET_WIDE_VREG %rax, %rax # eax <- vBB
- GET_WIDE_VREG %rcx, %rcx # ecx <- vCC
- .else
- GET_VREG %eax, %rax # eax <- vBB
- GET_VREG %rcx, %rcx # ecx <- vCC
- .endif
- testq %rcx, %rcx
- jz common_errDivideByZero
- cmpq $-1, %rcx
- je 2f
- cqo # rdx:rax <- sign-extended of rax
- idivq %rcx
-1:
- .if 1
- SET_WIDE_VREG %rax, rINSTq # eax <- vBB
- .else
- SET_VREG %rax, rINSTq # eax <- vBB
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 0
- xorq %rax, %rax
- .else
- negq %rax
- .endif
- jmp 1b
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long: /* 0x9f */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- .if 1
- GET_WIDE_VREG %rax, %rax # eax <- vBB
- GET_WIDE_VREG %rcx, %rcx # ecx <- vCC
- .else
- GET_VREG %eax, %rax # eax <- vBB
- GET_VREG %rcx, %rcx # ecx <- vCC
- .endif
- testq %rcx, %rcx
- jz common_errDivideByZero
- cmpq $-1, %rcx
- je 2f
- cqo # rdx:rax <- sign-extended of rax
- idivq %rcx
-1:
- .if 1
- SET_WIDE_VREG %rdx, rINSTq # eax <- vBB
- .else
- SET_VREG %rdx, rINSTq # eax <- vBB
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 1
- xorq %rdx, %rdx
- .else
- negq %rdx
- .endif
- jmp 1b
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long: /* 0xa0 */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- andq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
- SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long: /* 0xa1 */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- orq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
- SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long: /* 0xa2 */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- xorq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
- SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long: /* 0xa3 */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %ecx, %rcx # eax <- vCC
- .if 1
- GET_WIDE_VREG %rax, %rax # rax <- vBB
- salq %cl, %rax # ex: addl %ecx,%eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, %rax # eax <- vBB
- salq %cl, %rax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long: /* 0xa4 */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %ecx, %rcx # eax <- vCC
- .if 1
- GET_WIDE_VREG %rax, %rax # rax <- vBB
- sarq %cl, %rax # ex: addl %ecx,%eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, %rax # eax <- vBB
- sarq %cl, %rax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %ecx, %rcx # eax <- vCC
- .if 1
- GET_WIDE_VREG %rax, %rax # rax <- vBB
- shrq %cl, %rax # ex: addl %ecx,%eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, %rax # eax <- vBB
- shrq %cl, %rax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float: /* 0xa6 */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- addss VREG_ADDRESS(%rax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float: /* 0xa7 */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- subss VREG_ADDRESS(%rax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float: /* 0xa8 */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- mulss VREG_ADDRESS(%rax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float: /* 0xa9 */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- divss VREG_ADDRESS(%rax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float: /* 0xaa */
- /* rem_float vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx <- BB
- movzbq 2(rPC), %rax # eax <- CC
- flds VREG_ADDRESS(%rcx) # vBB to fp stack
- flds VREG_ADDRESS(%rax) # vCC to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstps VREG_ADDRESS(rINSTq) # %st to vAA
- CLEAR_REF rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double: /* 0xab */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- addsd VREG_ADDRESS(%rax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double: /* 0xac */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- subsd VREG_ADDRESS(%rax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double: /* 0xad */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- mulsd VREG_ADDRESS(%rax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double: /* 0xae */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- divsd VREG_ADDRESS(%rax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double: /* 0xaf */
- /* rem_double vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx <- BB
- movzbq 2(rPC), %rax # eax <- CC
- fldl VREG_ADDRESS(%rcx) # %st1 <- fp[vBB]
- fldl VREG_ADDRESS(%rax) # %st0 <- fp[vCC]
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstpl VREG_ADDRESS(rINSTq) # fp[vAA] <- %st
- CLEAR_WIDE_REF rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, rINSTq # eax <- vB
- addl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, rINSTq # eax <- vB
- subl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
- /* mul vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %rcx # eax <- vA
- imull (rFP,rINSTq,4), %eax
- SET_VREG %eax, %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # rcx <- B
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- GET_WIDE_VREG %rax, rINSTq # eax <- vA
- GET_WIDE_VREG %ecx, %rcx # ecx <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vA
- GET_VREG %ecx, %rcx # ecx <- vB
- .endif
- testl %ecx, %ecx
- jz common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rdx:rax <- sign-extended of rax
- idivl %ecx
-1:
- .if 0
- SET_WIDE_VREG %eax, rINSTq # vA <- result
- .else
- SET_VREG %eax, rINSTq # vA <- result
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
- .if 0
- xorl %eax, %eax
- .else
- negl %eax
- .endif
- jmp 1b
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # rcx <- B
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- GET_WIDE_VREG %rax, rINSTq # eax <- vA
- GET_WIDE_VREG %ecx, %rcx # ecx <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vA
- GET_VREG %ecx, %rcx # ecx <- vB
- .endif
- testl %ecx, %ecx
- jz common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rdx:rax <- sign-extended of rax
- idivl %ecx
-1:
- .if 0
- SET_WIDE_VREG %edx, rINSTq # vA <- result
- .else
- SET_VREG %edx, rINSTq # vA <- result
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
- .if 1
- xorl %edx, %edx
- .else
- negl %edx
- .endif
- jmp 1b
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, rINSTq # eax <- vB
- andl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, rINSTq # eax <- vB
- orl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, rINSTq # eax <- vB
- xorl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movl rINST, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # ecx <- vBB
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vAA
- sall %cl, %eax # ex: sarl %cl, %eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, rINSTq # eax <- vAA
- sall %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movl rINST, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # ecx <- vBB
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vAA
- sarl %cl, %eax # ex: sarl %cl, %eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, rINSTq # eax <- vAA
- sarl %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movl rINST, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # ecx <- vBB
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vAA
- shrl %cl, %eax # ex: sarl %cl, %eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, rINSTq # eax <- vAA
- shrl %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- addq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- subq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
- /* mul vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, %rcx # rax <- vA
- imulq (rFP,rINSTq,4), %rax
- SET_WIDE_VREG %rax, %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # rcx <- B
- andb $0xf, rINSTbl # rINST <- A
- .if 1
- GET_WIDE_VREG %rax, rINSTq # eax <- vA
- GET_WIDE_VREG %rcx, %rcx # ecx <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vA
- GET_VREG %rcx, %rcx # ecx <- vB
- .endif
- testq %rcx, %rcx
- jz common_errDivideByZero
- cmpq $-1, %rcx
- je 2f
- cqo # rdx:rax <- sign-extended of rax
- idivq %rcx
-1:
- .if 1
- SET_WIDE_VREG %rax, rINSTq # vA <- result
- .else
- SET_VREG %rax, rINSTq # vA <- result
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
- .if 0
- xorq %rax, %rax
- .else
- negq %rax
- .endif
- jmp 1b
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # rcx <- B
- andb $0xf, rINSTbl # rINST <- A
- .if 1
- GET_WIDE_VREG %rax, rINSTq # eax <- vA
- GET_WIDE_VREG %rcx, %rcx # ecx <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vA
- GET_VREG %rcx, %rcx # ecx <- vB
- .endif
- testq %rcx, %rcx
- jz common_errDivideByZero
- cmpq $-1, %rcx
- je 2f
- cqo # rdx:rax <- sign-extended of rax
- idivq %rcx
-1:
- .if 1
- SET_WIDE_VREG %rdx, rINSTq # vA <- result
- .else
- SET_VREG %rdx, rINSTq # vA <- result
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
- .if 1
- xorq %rdx, %rdx
- .else
- negq %rdx
- .endif
- jmp 1b
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- andq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- orq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- xorq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movl rINST, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # ecx <- vBB
- andb $0xf, rINSTbl # rINST <- A
- .if 1
- GET_WIDE_VREG %rax, rINSTq # rax <- vAA
- salq %cl, %rax # ex: sarl %cl, %eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, rINSTq # eax <- vAA
- salq %cl, %rax # ex: sarl %cl, %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movl rINST, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # ecx <- vBB
- andb $0xf, rINSTbl # rINST <- A
- .if 1
- GET_WIDE_VREG %rax, rINSTq # rax <- vAA
- sarq %cl, %rax # ex: sarl %cl, %eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, rINSTq # eax <- vAA
- sarq %cl, %rax # ex: sarl %cl, %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movl rINST, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # ecx <- vBB
- andb $0xf, rINSTbl # rINST <- A
- .if 1
- GET_WIDE_VREG %rax, rINSTq # rax <- vAA
- shrq %cl, %rax # ex: sarl %cl, %eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, rINSTq # eax <- vAA
- shrq %cl, %rax # ex: sarl %cl, %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- addss VREG_ADDRESS(rINSTq), %xmm0
- movss %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- subss VREG_ADDRESS(rINSTq), %xmm0
- movss %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- mulss VREG_ADDRESS(rINSTq), %xmm0
- movss %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- divss VREG_ADDRESS(rINSTq), %xmm0
- movss %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float_2addr: /* 0xca */
- /* rem_float/2addr vA, vB */
- movzbq rINSTbl, %rcx # ecx <- A+
- sarl $4, rINST # rINST <- B
- flds VREG_ADDRESS(rINSTq) # vB to fp stack
- andb $0xf, %cl # ecx <- A
- flds VREG_ADDRESS(%rcx) # vA to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstps VREG_ADDRESS(%rcx) # %st to vA
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double_2addr: /* 0xcb */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- addsd VREG_ADDRESS(rINSTq), %xmm0
- movsd %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- subsd VREG_ADDRESS(rINSTq), %xmm0
- movsd %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- mulsd VREG_ADDRESS(rINSTq), %xmm0
- movsd %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double_2addr: /* 0xce */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- divsd VREG_ADDRESS(rINSTq), %xmm0
- movsd %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
- /* rem_double/2addr vA, vB */
- movzbq rINSTbl, %rcx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fldl VREG_ADDRESS(rINSTq) # vB to fp stack
- andb $0xf, %cl # ecx <- A
- fldl VREG_ADDRESS(%rcx) # vA to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstpl VREG_ADDRESS(%rcx) # %st to vA
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- addl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- subl %eax, %ecx # for example: addl %ecx, %eax
- SET_VREG %ecx, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- imull %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/lit16 vA, vB, #+CCCC */
- /* Need A in rINST, ssssCCCC in ecx, vB in eax */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- testl %ecx, %ecx
- jz common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rax <- sign-extended of eax
- idivl %ecx
-1:
- SET_VREG %eax, rINSTq # vA <- result
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 0
- xorl %eax, %eax
- .else
- negl %eax
- .endif
- jmp 1b
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/lit16 vA, vB, #+CCCC */
- /* Need A in rINST, ssssCCCC in ecx, vB in eax */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- testl %ecx, %ecx
- jz common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rax <- sign-extended of eax
- idivl %ecx
-1:
- SET_VREG %edx, rINSTq # vA <- result
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 1
- xorl %edx, %edx
- .else
- negl %edx
- .endif
- jmp 1b
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- orl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- xorl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- addl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- subl %eax, %ecx # ex: addl %ecx,%eax
- SET_VREG %ecx, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- imull %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/*
- * 32-bit div/rem "lit8" binary operation. Handles special case of
- * op0=minint & op1=-1
- */
- /* div/rem/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rax <- sign-extended of eax
- idivl %ecx
-1:
- SET_VREG %eax, rINSTq # vA <- result
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 0
- xorl %eax, %eax
- .else
- negl %eax
- .endif
- jmp 1b
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/*
- * 32-bit div/rem "lit8" binary operation. Handles special case of
- * op0=minint & op1=-1
- */
- /* div/rem/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rax <- sign-extended of eax
- idivl %ecx
-1:
- SET_VREG %edx, rINSTq # vA <- result
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 1
- xorl %edx, %edx
- .else
- negl %edx
- .endif
- jmp 1b
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- andl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- orl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- xorl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- sall %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- sarl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- shrl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_quick: /* 0xe3 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
- /* op vA, vB, offset@CCCC */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- movzwq 2(rPC), %rax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf,rINSTbl # rINST <- A
- .if 0
- movq (%rcx,%rax,1), %rax
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- movl (%rcx,%rax,1), %eax
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
- /* op vA, vB, offset@CCCC */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- movzwq 2(rPC), %rax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf,rINSTbl # rINST <- A
- .if 1
- movq (%rcx,%rax,1), %rax
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- movswl (%rcx,%rax,1), %eax
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
- /* For: iget-object-quick */
- /* op vA, vB, offset@CCCC */
- .extern artIGetObjectFromMterp
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG OUT_32_ARG0, %rcx # vB (object we're operating on)
- movzwl 2(rPC), OUT_32_ARG1 # eax <- field byte offset
- EXPORT_PC
- callq SYMBOL(artIGetObjectFromMterp) # (obj, offset)
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_quick: /* 0xe6 */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINSTq # rINST <- v[A]
- movzwq 2(rPC), %rax # rax <- field byte offset
- movl rINST, (%rcx,%rax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
- /* iput-wide-quick vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movzwq 2(rPC), %rax # rax<- field byte offset
- leaq (%rcx,%rax,1), %rcx # ecx<- Address of 64-bit target
- andb $0xf, rINSTbl # rINST<- A
- GET_WIDE_VREG %rax, rINSTq # rax<- fp[A]/fp[A+1]
- movq %rax, (%rcx) # obj.field<- r0/r1
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST 232
- movl rINST, OUT_32_ARG2
- call SYMBOL(MterpIputObjectQuick)
- testb %al, %al
- jz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuick
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 233
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeVirtualQuick)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuickRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 234
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeVirtualQuickRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINSTq # rINST <- v[A]
- movzwq 2(rPC), %rax # rax <- field byte offset
- movb rINSTbl, (%rcx,%rax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte_quick: /* 0xec */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINSTq # rINST <- v[A]
- movzwq 2(rPC), %rax # rax <- field byte offset
- movb rINSTbl, (%rcx,%rax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char_quick: /* 0xed */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINSTq # rINST <- v[A]
- movzwq 2(rPC), %rax # rax <- field byte offset
- movw rINSTw, (%rcx,%rax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short_quick: /* 0xee */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINSTq # rINST <- v[A]
- movzwq 2(rPC), %rax # rax <- field byte offset
- movw rINSTw, (%rcx,%rax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
- /* op vA, vB, offset@CCCC */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- movzwq 2(rPC), %rax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf,rINSTbl # rINST <- A
- .if 0
- movq (%rcx,%rax,1), %rax
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- movsbl (%rcx,%rax,1), %eax
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
- /* op vA, vB, offset@CCCC */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- movzwq 2(rPC), %rax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf,rINSTbl # rINST <- A
- .if 0
- movq (%rcx,%rax,1), %rax
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- movsbl (%rcx,%rax,1), %eax
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
- /* op vA, vB, offset@CCCC */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- movzwq 2(rPC), %rax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf,rINSTbl # rINST <- A
- .if 0
- movq (%rcx,%rax,1), %rax
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- movzwl (%rcx,%rax,1), %eax
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
- /* op vA, vB, offset@CCCC */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- movzwq 2(rPC), %rax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf,rINSTbl # rINST <- A
- .if 0
- movq (%rcx,%rax,1), %rax
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- movswl (%rcx,%rax,1), %eax
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphic
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 250
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokePolymorphic)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 4
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphicRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 251
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokePolymorphicRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 4
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustom
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 252
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeCustom)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustomRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 253
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeCustomRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_handle: /* 0xfe */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodHandle
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstMethodHandle) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_type: /* 0xff */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodType
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstMethodType) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
- .balign 128
-
- OBJECT_TYPE(artMterpAsmInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
- .global SYMBOL(artMterpAsmInstructionEnd)
-SYMBOL(artMterpAsmInstructionEnd):
-
- OBJECT_TYPE(artMterpAsmAltInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
- .global SYMBOL(artMterpAsmAltInstructionStart)
- .text
-SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(0*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move: /* 0x01 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(1*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(2*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(3*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(4*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(5*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(6*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(7*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(8*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(9*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(10*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(11*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(12*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(13*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(14*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return: /* 0x0f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(15*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(16*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(17*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(18*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(19*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const: /* 0x14 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(20*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(21*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(22*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(23*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(24*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(25*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(26*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(27*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(28*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(29*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(30*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(31*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(32*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(33*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(34*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(35*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(36*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(37*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(38*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(39*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(40*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(41*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(42*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(43*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(44*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(45*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(46*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(47*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(48*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(49*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(50*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(51*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(52*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(53*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(54*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(55*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(56*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(57*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(58*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(59*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(60*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(61*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(62*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(63*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(64*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(65*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(66*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(67*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(68*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(69*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(70*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(71*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(72*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(73*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(74*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(75*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(76*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(77*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(78*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(79*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(80*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(81*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(82*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(83*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(84*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(85*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(86*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(87*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(88*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(89*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(90*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(91*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(92*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(93*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(94*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(95*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(96*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(97*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(98*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(99*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(100*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(101*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(102*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(103*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(104*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(105*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(106*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(107*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(108*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(109*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(110*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(111*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(112*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(113*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(114*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(115*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(116*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(117*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(118*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(119*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(120*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(121*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(122*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(123*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(124*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(125*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(126*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(127*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(128*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(129*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(130*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(131*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(132*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(133*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(134*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(135*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(136*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(137*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(138*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(139*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(140*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(141*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(142*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(143*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(144*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(145*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(146*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(147*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(148*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(149*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(150*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(151*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(152*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(153*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(154*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(155*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(156*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(157*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(158*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(159*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(160*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(161*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(162*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(163*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(164*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(165*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(166*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(167*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(168*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(169*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(170*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(171*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(172*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(173*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(174*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(175*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(176*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(177*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(178*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(179*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(180*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(181*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(182*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(183*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(184*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(185*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(186*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(187*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(188*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(189*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(190*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(191*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(192*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(193*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(194*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(195*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(196*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(197*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(198*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(199*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(200*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(201*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(202*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(203*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(204*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(205*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(206*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(207*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(208*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(209*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(210*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(211*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(212*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(213*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(214*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(215*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(216*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(217*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(218*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(219*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(220*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(221*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(222*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(223*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(224*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(225*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(226*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(227*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(228*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(229*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(230*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(231*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(232*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(233*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(234*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(235*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(236*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(237*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(238*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(239*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(240*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(241*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(242*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(243*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(244*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(245*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(246*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(247*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(248*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(249*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(250*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(251*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(252*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(253*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(254*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(255*128)
-
- .balign 128
-
- OBJECT_TYPE(artMterpAsmAltInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
- .global SYMBOL(artMterpAsmAltInstructionEnd)
-SYMBOL(artMterpAsmAltInstructionEnd):
-
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogDivideByZeroException)
-#endif
- jmp MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogArrayIndexException)
-#endif
- jmp MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogNegativeArraySizeException)
-#endif
- jmp MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogNoSuchMethodException)
-#endif
- jmp MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogNullObjectException)
-#endif
- jmp MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogExceptionThrownException)
-#endif
- jmp MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movl THREAD_FLAGS_OFFSET(OUT_ARG0), OUT_32_ARG2
- call SYMBOL(MterpLogSuspendFallback)
-#endif
- jmp MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jz MterpFallback
- /* intentional fallthrough - handle pending exception. */
-
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpHandleException)
- testb %al, %al
- jz MterpExceptionReturn
- movq OFF_FP_DEX_INSTRUCTIONS(rFP), %rax
- mov OFF_FP_DEX_PC(rFP), %ecx
- leaq (%rax, %rcx, 2), rPC
- movq rPC, OFF_FP_DEX_PC_PTR(rFP)
- /* Do we need to switch interpreters? */
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- /* resume execution at catch block */
- REFRESH_IBASE
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 32 bits)
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranch:
- jg .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_osr_check
- decl rPROFILE
- je .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- movq rSELF, %rax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
- REFRESH_IBASE_REG %rax
- leaq (rPC, rINSTq, 2), rPC
- FETCH_INST
- jnz .L_suspend_request_pending
- GOTO_NEXT
-
-.L_suspend_request_pending:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- call SYMBOL(MterpSuspendCheck) # (self)
- testb %al, %al
- jnz MterpFallback
- REFRESH_IBASE # might have changed during suspend
- GOTO_NEXT
-
-.L_no_count_backwards:
- cmpl $JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- jne .L_resume_backward_branch
-.L_osr_check:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jz .L_resume_backward_branch
- jmp MterpOnStackReplacement
-
-.L_forward_branch:
- cmpl $JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- je .L_check_osr_forward
-.L_resume_forward_branch:
- leaq (rPC, rINSTq, 2), rPC
- FETCH_INST
- GOTO_NEXT
-
-.L_check_osr_forward:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jz .L_resume_forward_branch
- jmp MterpOnStackReplacement
-
-.L_add_batch:
- movl rPROFILE, %eax
- movq OFF_FP_METHOD(rFP), OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movw %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
- movq rSELF, OUT_ARG2
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- movswl %ax, rPROFILE
- jmp .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movl $2, OUT_32_ARG2
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jnz MterpOnStackReplacement
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movl rINST, OUT_32_ARG2
- call SYMBOL(MterpLogOSR)
-#endif
- movl $1, %eax
- jmp MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogFallback)
-#endif
-MterpCommonFallback:
- xorl %eax, %eax
- jmp MterpDone
-
-/*
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- movl $1, %eax
- jmp MterpDone
-MterpReturn:
- movq OFF_FP_RESULT_REGISTER(rFP), %rdx
- movq %rax, (%rdx)
- movl $1, %eax
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- testl rPROFILE, rPROFILE
- jle MRestoreFrame # if > 0, we may have some counts to report.
-
- movl %eax, rINST # stash return value
- /* Report cached hotness counts */
- movl rPROFILE, %eax
- movq OFF_FP_METHOD(rFP), OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movw %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
- movq rSELF, OUT_ARG2
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- movl rINST, %eax # restore return value
-
- /* pop up frame */
-MRestoreFrame:
- addq $FRAME_SIZE, %rsp
- .cfi_adjust_cfa_offset -FRAME_SIZE
-
- /* Restore callee save register */
- POP %r15
- POP %r14
- POP %r13
- POP %r12
- POP %rbp
- POP %rbx
- ret
- .cfi_endproc
- SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
-